linux/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
<<
>>
Prefs
   1// SPDX-License-Identifier:  GPL-2.0
   2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
   3
   4/*
   5 * Synopsys DesignWare AXI DMA Controller driver.
   6 *
   7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <linux/dmaengine.h>
  14#include <linux/dmapool.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/err.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/iopoll.h>
  20#include <linux/io-64-nonatomic-lo-hi.h>
  21#include <linux/kernel.h>
  22#include <linux/module.h>
  23#include <linux/of.h>
  24#include <linux/of_dma.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/property.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30
  31#include "dw-axi-dmac.h"
  32#include "../dmaengine.h"
  33#include "../virt-dma.h"
  34
  35/*
  36 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
  37 * master data bus width up to 512 bits (for both AXI master interfaces), but
  38 * it depends on IP block configurarion.
  39 */
  40#define AXI_DMA_BUSWIDTHS                 \
  41        (DMA_SLAVE_BUSWIDTH_1_BYTE      | \
  42        DMA_SLAVE_BUSWIDTH_2_BYTES      | \
  43        DMA_SLAVE_BUSWIDTH_4_BYTES      | \
  44        DMA_SLAVE_BUSWIDTH_8_BYTES      | \
  45        DMA_SLAVE_BUSWIDTH_16_BYTES     | \
  46        DMA_SLAVE_BUSWIDTH_32_BYTES     | \
  47        DMA_SLAVE_BUSWIDTH_64_BYTES)
  48
  49static inline void
  50axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
  51{
  52        iowrite32(val, chip->regs + reg);
  53}
  54
  55static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
  56{
  57        return ioread32(chip->regs + reg);
  58}
  59
  60static inline void
  61axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
  62{
  63        iowrite32(val, chan->chan_regs + reg);
  64}
  65
  66static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
  67{
  68        return ioread32(chan->chan_regs + reg);
  69}
  70
  71static inline void
  72axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
  73{
  74        /*
  75         * We split one 64 bit write for two 32 bit write as some HW doesn't
  76         * support 64 bit access.
  77         */
  78        iowrite32(lower_32_bits(val), chan->chan_regs + reg);
  79        iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
  80}
  81
  82static inline void axi_dma_disable(struct axi_dma_chip *chip)
  83{
  84        u32 val;
  85
  86        val = axi_dma_ioread32(chip, DMAC_CFG);
  87        val &= ~DMAC_EN_MASK;
  88        axi_dma_iowrite32(chip, DMAC_CFG, val);
  89}
  90
  91static inline void axi_dma_enable(struct axi_dma_chip *chip)
  92{
  93        u32 val;
  94
  95        val = axi_dma_ioread32(chip, DMAC_CFG);
  96        val |= DMAC_EN_MASK;
  97        axi_dma_iowrite32(chip, DMAC_CFG, val);
  98}
  99
 100static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
 101{
 102        u32 val;
 103
 104        val = axi_dma_ioread32(chip, DMAC_CFG);
 105        val &= ~INT_EN_MASK;
 106        axi_dma_iowrite32(chip, DMAC_CFG, val);
 107}
 108
 109static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
 110{
 111        u32 val;
 112
 113        val = axi_dma_ioread32(chip, DMAC_CFG);
 114        val |= INT_EN_MASK;
 115        axi_dma_iowrite32(chip, DMAC_CFG, val);
 116}
 117
 118static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
 119{
 120        u32 val;
 121
 122        if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
 123                axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
 124        } else {
 125                val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
 126                val &= ~irq_mask;
 127                axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
 128        }
 129}
 130
 131static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
 132{
 133        axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
 134}
 135
 136static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
 137{
 138        axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
 139}
 140
 141static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
 142{
 143        axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
 144}
 145
 146static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
 147{
 148        return axi_chan_ioread32(chan, CH_INTSTATUS);
 149}
 150
 151static inline void axi_chan_disable(struct axi_dma_chan *chan)
 152{
 153        u32 val;
 154
 155        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 156        val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
 157        val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
 158        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 159}
 160
 161static inline void axi_chan_enable(struct axi_dma_chan *chan)
 162{
 163        u32 val;
 164
 165        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 166        val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
 167               BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
 168        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 169}
 170
 171static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
 172{
 173        u32 val;
 174
 175        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 176
 177        return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
 178}
 179
 180static void axi_dma_hw_init(struct axi_dma_chip *chip)
 181{
 182        u32 i;
 183
 184        for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
 185                axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
 186                axi_chan_disable(&chip->dw->chan[i]);
 187        }
 188}
 189
 190static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
 191                                   dma_addr_t dst, size_t len)
 192{
 193        u32 max_width = chan->chip->dw->hdata->m_data_width;
 194
 195        return __ffs(src | dst | len | BIT(max_width));
 196}
 197
 198static inline const char *axi_chan_name(struct axi_dma_chan *chan)
 199{
 200        return dma_chan_name(&chan->vc.chan);
 201}
 202
 203static struct axi_dma_desc *axi_desc_alloc(u32 num)
 204{
 205        struct axi_dma_desc *desc;
 206
 207        desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
 208        if (!desc)
 209                return NULL;
 210
 211        desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
 212        if (!desc->hw_desc) {
 213                kfree(desc);
 214                return NULL;
 215        }
 216
 217        return desc;
 218}
 219
 220static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
 221                                        dma_addr_t *addr)
 222{
 223        struct axi_dma_lli *lli;
 224        dma_addr_t phys;
 225
 226        lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
 227        if (unlikely(!lli)) {
 228                dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
 229                        axi_chan_name(chan));
 230                return NULL;
 231        }
 232
 233        atomic_inc(&chan->descs_allocated);
 234        *addr = phys;
 235
 236        return lli;
 237}
 238
 239static void axi_desc_put(struct axi_dma_desc *desc)
 240{
 241        struct axi_dma_chan *chan = desc->chan;
 242        int count = atomic_read(&chan->descs_allocated);
 243        struct axi_dma_hw_desc *hw_desc;
 244        int descs_put;
 245
 246        for (descs_put = 0; descs_put < count; descs_put++) {
 247                hw_desc = &desc->hw_desc[descs_put];
 248                dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
 249        }
 250
 251        kfree(desc->hw_desc);
 252        kfree(desc);
 253        atomic_sub(descs_put, &chan->descs_allocated);
 254        dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
 255                axi_chan_name(chan), descs_put,
 256                atomic_read(&chan->descs_allocated));
 257}
 258
 259static void vchan_desc_put(struct virt_dma_desc *vdesc)
 260{
 261        axi_desc_put(vd_to_axi_desc(vdesc));
 262}
 263
 264static enum dma_status
 265dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
 266                  struct dma_tx_state *txstate)
 267{
 268        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 269        struct virt_dma_desc *vdesc;
 270        enum dma_status status;
 271        u32 completed_length;
 272        unsigned long flags;
 273        u32 completed_blocks;
 274        size_t bytes = 0;
 275        u32 length;
 276        u32 len;
 277
 278        status = dma_cookie_status(dchan, cookie, txstate);
 279        if (status == DMA_COMPLETE || !txstate)
 280                return status;
 281
 282        spin_lock_irqsave(&chan->vc.lock, flags);
 283
 284        vdesc = vchan_find_desc(&chan->vc, cookie);
 285        if (vdesc) {
 286                length = vd_to_axi_desc(vdesc)->length;
 287                completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
 288                len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
 289                completed_length = completed_blocks * len;
 290                bytes = length - completed_length;
 291        } else {
 292                bytes = vd_to_axi_desc(vdesc)->length;
 293        }
 294
 295        spin_unlock_irqrestore(&chan->vc.lock, flags);
 296        dma_set_residue(txstate, bytes);
 297
 298        return status;
 299}
 300
 301static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
 302{
 303        desc->lli->llp = cpu_to_le64(adr);
 304}
 305
 306static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
 307{
 308        axi_chan_iowrite64(chan, CH_LLP, adr);
 309}
 310
 311static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
 312{
 313        u32 offset = DMAC_APB_BYTE_WR_CH_EN;
 314        u32 reg_width, val;
 315
 316        if (!chan->chip->apb_regs) {
 317                dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
 318                return;
 319        }
 320
 321        reg_width = __ffs(chan->config.dst_addr_width);
 322        if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
 323                offset = DMAC_APB_HALFWORD_WR_CH_EN;
 324
 325        val = ioread32(chan->chip->apb_regs + offset);
 326
 327        if (set)
 328                val |= BIT(chan->id);
 329        else
 330                val &= ~BIT(chan->id);
 331
 332        iowrite32(val, chan->chip->apb_regs + offset);
 333}
 334/* Called in chan locked context */
 335static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
 336                                      struct axi_dma_desc *first)
 337{
 338        u32 priority = chan->chip->dw->hdata->priority[chan->id];
 339        u32 reg, irq_mask;
 340        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 341
 342        if (unlikely(axi_chan_is_hw_enable(chan))) {
 343                dev_err(chan2dev(chan), "%s is non-idle!\n",
 344                        axi_chan_name(chan));
 345
 346                return;
 347        }
 348
 349        axi_dma_enable(chan->chip);
 350
 351        reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
 352               DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
 353        axi_chan_iowrite32(chan, CH_CFG_L, reg);
 354
 355        reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
 356               priority << CH_CFG_H_PRIORITY_POS |
 357               DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
 358               DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
 359        switch (chan->direction) {
 360        case DMA_MEM_TO_DEV:
 361                dw_axi_dma_set_byte_halfword(chan, true);
 362                reg |= (chan->config.device_fc ?
 363                        DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
 364                        DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
 365                        << CH_CFG_H_TT_FC_POS;
 366                if (chan->chip->apb_regs)
 367                        reg |= (chan->id << CH_CFG_H_DST_PER_POS);
 368                break;
 369        case DMA_DEV_TO_MEM:
 370                reg |= (chan->config.device_fc ?
 371                        DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
 372                        DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
 373                        << CH_CFG_H_TT_FC_POS;
 374                if (chan->chip->apb_regs)
 375                        reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
 376                break;
 377        default:
 378                break;
 379        }
 380        axi_chan_iowrite32(chan, CH_CFG_H, reg);
 381
 382        write_chan_llp(chan, first->hw_desc[0].llp | lms);
 383
 384        irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
 385        axi_chan_irq_sig_set(chan, irq_mask);
 386
 387        /* Generate 'suspend' status but don't generate interrupt */
 388        irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
 389        axi_chan_irq_set(chan, irq_mask);
 390
 391        axi_chan_enable(chan);
 392}
 393
 394static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
 395{
 396        struct axi_dma_desc *desc;
 397        struct virt_dma_desc *vd;
 398
 399        vd = vchan_next_desc(&chan->vc);
 400        if (!vd)
 401                return;
 402
 403        desc = vd_to_axi_desc(vd);
 404        dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
 405                vd->tx.cookie);
 406        axi_chan_block_xfer_start(chan, desc);
 407}
 408
 409static void dma_chan_issue_pending(struct dma_chan *dchan)
 410{
 411        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 412        unsigned long flags;
 413
 414        spin_lock_irqsave(&chan->vc.lock, flags);
 415        if (vchan_issue_pending(&chan->vc))
 416                axi_chan_start_first_queued(chan);
 417        spin_unlock_irqrestore(&chan->vc.lock, flags);
 418}
 419
 420static void dw_axi_dma_synchronize(struct dma_chan *dchan)
 421{
 422        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 423
 424        vchan_synchronize(&chan->vc);
 425}
 426
 427static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
 428{
 429        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 430
 431        /* ASSERT: channel is idle */
 432        if (axi_chan_is_hw_enable(chan)) {
 433                dev_err(chan2dev(chan), "%s is non-idle!\n",
 434                        axi_chan_name(chan));
 435                return -EBUSY;
 436        }
 437
 438        /* LLI address must be aligned to a 64-byte boundary */
 439        chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
 440                                          chan->chip->dev,
 441                                          sizeof(struct axi_dma_lli),
 442                                          64, 0);
 443        if (!chan->desc_pool) {
 444                dev_err(chan2dev(chan), "No memory for descriptors\n");
 445                return -ENOMEM;
 446        }
 447        dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
 448
 449        pm_runtime_get(chan->chip->dev);
 450
 451        return 0;
 452}
 453
 454static void dma_chan_free_chan_resources(struct dma_chan *dchan)
 455{
 456        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 457
 458        /* ASSERT: channel is idle */
 459        if (axi_chan_is_hw_enable(chan))
 460                dev_err(dchan2dev(dchan), "%s is non-idle!\n",
 461                        axi_chan_name(chan));
 462
 463        axi_chan_disable(chan);
 464        axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
 465
 466        vchan_free_chan_resources(&chan->vc);
 467
 468        dma_pool_destroy(chan->desc_pool);
 469        chan->desc_pool = NULL;
 470        dev_vdbg(dchan2dev(dchan),
 471                 "%s: free resources, descriptor still allocated: %u\n",
 472                 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
 473
 474        pm_runtime_put(chan->chip->dev);
 475}
 476
 477static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
 478{
 479        struct axi_dma_chip *chip = chan->chip;
 480        unsigned long reg_value, val;
 481
 482        if (!chip->apb_regs) {
 483                dev_err(chip->dev, "apb_regs not initialized\n");
 484                return;
 485        }
 486
 487        /*
 488         * An unused DMA channel has a default value of 0x3F.
 489         * Lock the DMA channel by assign a handshake number to the channel.
 490         * Unlock the DMA channel by assign 0x3F to the channel.
 491         */
 492        if (set)
 493                val = chan->hw_handshake_num;
 494        else
 495                val = UNUSED_CHANNEL;
 496
 497        reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
 498
 499        /* Channel is already allocated, set handshake as per channel ID */
 500        /* 64 bit write should handle for 8 channels */
 501
 502        reg_value &= ~(DMA_APB_HS_SEL_MASK <<
 503                        (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
 504        reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
 505        lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
 506
 507        return;
 508}
 509
 510/*
 511 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
 512 * as 1, it understands that the current block is the final block in the
 513 * transfer and completes the DMA transfer operation at the end of current
 514 * block transfer.
 515 */
 516static void set_desc_last(struct axi_dma_hw_desc *desc)
 517{
 518        u32 val;
 519
 520        val = le32_to_cpu(desc->lli->ctl_hi);
 521        val |= CH_CTL_H_LLI_LAST;
 522        desc->lli->ctl_hi = cpu_to_le32(val);
 523}
 524
 525static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
 526{
 527        desc->lli->sar = cpu_to_le64(adr);
 528}
 529
 530static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
 531{
 532        desc->lli->dar = cpu_to_le64(adr);
 533}
 534
 535static void set_desc_src_master(struct axi_dma_hw_desc *desc)
 536{
 537        u32 val;
 538
 539        /* Select AXI0 for source master */
 540        val = le32_to_cpu(desc->lli->ctl_lo);
 541        val &= ~CH_CTL_L_SRC_MAST;
 542        desc->lli->ctl_lo = cpu_to_le32(val);
 543}
 544
 545static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
 546                                 struct axi_dma_desc *desc)
 547{
 548        u32 val;
 549
 550        /* Select AXI1 for source master if available */
 551        val = le32_to_cpu(hw_desc->lli->ctl_lo);
 552        if (desc->chan->chip->dw->hdata->nr_masters > 1)
 553                val |= CH_CTL_L_DST_MAST;
 554        else
 555                val &= ~CH_CTL_L_DST_MAST;
 556
 557        hw_desc->lli->ctl_lo = cpu_to_le32(val);
 558}
 559
 560static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
 561                                  struct axi_dma_hw_desc *hw_desc,
 562                                  dma_addr_t mem_addr, size_t len)
 563{
 564        unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
 565        unsigned int reg_width;
 566        unsigned int mem_width;
 567        dma_addr_t device_addr;
 568        size_t axi_block_ts;
 569        size_t block_ts;
 570        u32 ctllo, ctlhi;
 571        u32 burst_len;
 572
 573        axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
 574
 575        mem_width = __ffs(data_width | mem_addr | len);
 576        if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
 577                mem_width = DWAXIDMAC_TRANS_WIDTH_32;
 578
 579        if (!IS_ALIGNED(mem_addr, 4)) {
 580                dev_err(chan->chip->dev, "invalid buffer alignment\n");
 581                return -EINVAL;
 582        }
 583
 584        switch (chan->direction) {
 585        case DMA_MEM_TO_DEV:
 586                reg_width = __ffs(chan->config.dst_addr_width);
 587                device_addr = chan->config.dst_addr;
 588                ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
 589                        mem_width << CH_CTL_L_SRC_WIDTH_POS |
 590                        DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
 591                        DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
 592                block_ts = len >> mem_width;
 593                break;
 594        case DMA_DEV_TO_MEM:
 595                reg_width = __ffs(chan->config.src_addr_width);
 596                device_addr = chan->config.src_addr;
 597                ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
 598                        mem_width << CH_CTL_L_DST_WIDTH_POS |
 599                        DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
 600                        DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
 601                block_ts = len >> reg_width;
 602                break;
 603        default:
 604                return -EINVAL;
 605        }
 606
 607        if (block_ts > axi_block_ts)
 608                return -EINVAL;
 609
 610        hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
 611        if (unlikely(!hw_desc->lli))
 612                return -ENOMEM;
 613
 614        ctlhi = CH_CTL_H_LLI_VALID;
 615
 616        if (chan->chip->dw->hdata->restrict_axi_burst_len) {
 617                burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
 618                ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
 619                         burst_len << CH_CTL_H_ARLEN_POS |
 620                         burst_len << CH_CTL_H_AWLEN_POS;
 621        }
 622
 623        hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
 624
 625        if (chan->direction == DMA_MEM_TO_DEV) {
 626                write_desc_sar(hw_desc, mem_addr);
 627                write_desc_dar(hw_desc, device_addr);
 628        } else {
 629                write_desc_sar(hw_desc, device_addr);
 630                write_desc_dar(hw_desc, mem_addr);
 631        }
 632
 633        hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
 634
 635        ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
 636                 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
 637        hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
 638
 639        set_desc_src_master(hw_desc);
 640
 641        hw_desc->len = len;
 642        return 0;
 643}
 644
 645static size_t calculate_block_len(struct axi_dma_chan *chan,
 646                                  dma_addr_t dma_addr, size_t buf_len,
 647                                  enum dma_transfer_direction direction)
 648{
 649        u32 data_width, reg_width, mem_width;
 650        size_t axi_block_ts, block_len;
 651
 652        axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
 653
 654        switch (direction) {
 655        case DMA_MEM_TO_DEV:
 656                data_width = BIT(chan->chip->dw->hdata->m_data_width);
 657                mem_width = __ffs(data_width | dma_addr | buf_len);
 658                if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
 659                        mem_width = DWAXIDMAC_TRANS_WIDTH_32;
 660
 661                block_len = axi_block_ts << mem_width;
 662                break;
 663        case DMA_DEV_TO_MEM:
 664                reg_width = __ffs(chan->config.src_addr_width);
 665                block_len = axi_block_ts << reg_width;
 666                break;
 667        default:
 668                block_len = 0;
 669        }
 670
 671        return block_len;
 672}
 673
 674static struct dma_async_tx_descriptor *
 675dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
 676                            size_t buf_len, size_t period_len,
 677                            enum dma_transfer_direction direction,
 678                            unsigned long flags)
 679{
 680        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 681        struct axi_dma_hw_desc *hw_desc = NULL;
 682        struct axi_dma_desc *desc = NULL;
 683        dma_addr_t src_addr = dma_addr;
 684        u32 num_periods, num_segments;
 685        size_t axi_block_len;
 686        u32 total_segments;
 687        u32 segment_len;
 688        unsigned int i;
 689        int status;
 690        u64 llp = 0;
 691        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 692
 693        num_periods = buf_len / period_len;
 694
 695        axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
 696        if (axi_block_len == 0)
 697                return NULL;
 698
 699        num_segments = DIV_ROUND_UP(period_len, axi_block_len);
 700        segment_len = DIV_ROUND_UP(period_len, num_segments);
 701
 702        total_segments = num_periods * num_segments;
 703
 704        desc = axi_desc_alloc(total_segments);
 705        if (unlikely(!desc))
 706                goto err_desc_get;
 707
 708        chan->direction = direction;
 709        desc->chan = chan;
 710        chan->cyclic = true;
 711        desc->length = 0;
 712        desc->period_len = period_len;
 713
 714        for (i = 0; i < total_segments; i++) {
 715                hw_desc = &desc->hw_desc[i];
 716
 717                status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
 718                                                segment_len);
 719                if (status < 0)
 720                        goto err_desc_get;
 721
 722                desc->length += hw_desc->len;
 723                /* Set end-of-link to the linked descriptor, so that cyclic
 724                 * callback function can be triggered during interrupt.
 725                 */
 726                set_desc_last(hw_desc);
 727
 728                src_addr += segment_len;
 729        }
 730
 731        llp = desc->hw_desc[0].llp;
 732
 733        /* Managed transfer list */
 734        do {
 735                hw_desc = &desc->hw_desc[--total_segments];
 736                write_desc_llp(hw_desc, llp | lms);
 737                llp = hw_desc->llp;
 738        } while (total_segments);
 739
 740        dw_axi_dma_set_hw_channel(chan, true);
 741
 742        return vchan_tx_prep(&chan->vc, &desc->vd, flags);
 743
 744err_desc_get:
 745        if (desc)
 746                axi_desc_put(desc);
 747
 748        return NULL;
 749}
 750
 751static struct dma_async_tx_descriptor *
 752dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 753                              unsigned int sg_len,
 754                              enum dma_transfer_direction direction,
 755                              unsigned long flags, void *context)
 756{
 757        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 758        struct axi_dma_hw_desc *hw_desc = NULL;
 759        struct axi_dma_desc *desc = NULL;
 760        u32 num_segments, segment_len;
 761        unsigned int loop = 0;
 762        struct scatterlist *sg;
 763        size_t axi_block_len;
 764        u32 len, num_sgs = 0;
 765        unsigned int i;
 766        dma_addr_t mem;
 767        int status;
 768        u64 llp = 0;
 769        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 770
 771        if (unlikely(!is_slave_direction(direction) || !sg_len))
 772                return NULL;
 773
 774        mem = sg_dma_address(sgl);
 775        len = sg_dma_len(sgl);
 776
 777        axi_block_len = calculate_block_len(chan, mem, len, direction);
 778        if (axi_block_len == 0)
 779                return NULL;
 780
 781        for_each_sg(sgl, sg, sg_len, i)
 782                num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
 783
 784        desc = axi_desc_alloc(num_sgs);
 785        if (unlikely(!desc))
 786                goto err_desc_get;
 787
 788        desc->chan = chan;
 789        desc->length = 0;
 790        chan->direction = direction;
 791
 792        for_each_sg(sgl, sg, sg_len, i) {
 793                mem = sg_dma_address(sg);
 794                len = sg_dma_len(sg);
 795                num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
 796                segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
 797
 798                do {
 799                        hw_desc = &desc->hw_desc[loop++];
 800                        status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
 801                        if (status < 0)
 802                                goto err_desc_get;
 803
 804                        desc->length += hw_desc->len;
 805                        len -= segment_len;
 806                        mem += segment_len;
 807                } while (len >= segment_len);
 808        }
 809
 810        /* Set end-of-link to the last link descriptor of list */
 811        set_desc_last(&desc->hw_desc[num_sgs - 1]);
 812
 813        /* Managed transfer list */
 814        do {
 815                hw_desc = &desc->hw_desc[--num_sgs];
 816                write_desc_llp(hw_desc, llp | lms);
 817                llp = hw_desc->llp;
 818        } while (num_sgs);
 819
 820        dw_axi_dma_set_hw_channel(chan, true);
 821
 822        return vchan_tx_prep(&chan->vc, &desc->vd, flags);
 823
 824err_desc_get:
 825        if (desc)
 826                axi_desc_put(desc);
 827
 828        return NULL;
 829}
 830
 831static struct dma_async_tx_descriptor *
 832dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
 833                         dma_addr_t src_adr, size_t len, unsigned long flags)
 834{
 835        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 836        size_t block_ts, max_block_ts, xfer_len;
 837        struct axi_dma_hw_desc *hw_desc = NULL;
 838        struct axi_dma_desc *desc = NULL;
 839        u32 xfer_width, reg, num;
 840        u64 llp = 0;
 841        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 842
 843        dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
 844                axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
 845
 846        max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
 847        xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
 848        num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
 849        desc = axi_desc_alloc(num);
 850        if (unlikely(!desc))
 851                goto err_desc_get;
 852
 853        desc->chan = chan;
 854        num = 0;
 855        desc->length = 0;
 856        while (len) {
 857                xfer_len = len;
 858
 859                hw_desc = &desc->hw_desc[num];
 860                /*
 861                 * Take care for the alignment.
 862                 * Actually source and destination widths can be different, but
 863                 * make them same to be simpler.
 864                 */
 865                xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
 866
 867                /*
 868                 * block_ts indicates the total number of data of width
 869                 * to be transferred in a DMA block transfer.
 870                 * BLOCK_TS register should be set to block_ts - 1
 871                 */
 872                block_ts = xfer_len >> xfer_width;
 873                if (block_ts > max_block_ts) {
 874                        block_ts = max_block_ts;
 875                        xfer_len = max_block_ts << xfer_width;
 876                }
 877
 878                hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
 879                if (unlikely(!hw_desc->lli))
 880                        goto err_desc_get;
 881
 882                write_desc_sar(hw_desc, src_adr);
 883                write_desc_dar(hw_desc, dst_adr);
 884                hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
 885
 886                reg = CH_CTL_H_LLI_VALID;
 887                if (chan->chip->dw->hdata->restrict_axi_burst_len) {
 888                        u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
 889
 890                        reg |= (CH_CTL_H_ARLEN_EN |
 891                                burst_len << CH_CTL_H_ARLEN_POS |
 892                                CH_CTL_H_AWLEN_EN |
 893                                burst_len << CH_CTL_H_AWLEN_POS);
 894                }
 895                hw_desc->lli->ctl_hi = cpu_to_le32(reg);
 896
 897                reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
 898                       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
 899                       xfer_width << CH_CTL_L_DST_WIDTH_POS |
 900                       xfer_width << CH_CTL_L_SRC_WIDTH_POS |
 901                       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
 902                       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
 903                hw_desc->lli->ctl_lo = cpu_to_le32(reg);
 904
 905                set_desc_src_master(hw_desc);
 906                set_desc_dest_master(hw_desc, desc);
 907
 908                hw_desc->len = xfer_len;
 909                desc->length += hw_desc->len;
 910                /* update the length and addresses for the next loop cycle */
 911                len -= xfer_len;
 912                dst_adr += xfer_len;
 913                src_adr += xfer_len;
 914                num++;
 915        }
 916
 917        /* Set end-of-link to the last link descriptor of list */
 918        set_desc_last(&desc->hw_desc[num - 1]);
 919        /* Managed transfer list */
 920        do {
 921                hw_desc = &desc->hw_desc[--num];
 922                write_desc_llp(hw_desc, llp | lms);
 923                llp = hw_desc->llp;
 924        } while (num);
 925
 926        return vchan_tx_prep(&chan->vc, &desc->vd, flags);
 927
 928err_desc_get:
 929        if (desc)
 930                axi_desc_put(desc);
 931        return NULL;
 932}
 933
 934static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
 935                                        struct dma_slave_config *config)
 936{
 937        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 938
 939        memcpy(&chan->config, config, sizeof(*config));
 940
 941        return 0;
 942}
 943
 944static void axi_chan_dump_lli(struct axi_dma_chan *chan,
 945                              struct axi_dma_hw_desc *desc)
 946{
 947        dev_err(dchan2dev(&chan->vc.chan),
 948                "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
 949                le64_to_cpu(desc->lli->sar),
 950                le64_to_cpu(desc->lli->dar),
 951                le64_to_cpu(desc->lli->llp),
 952                le32_to_cpu(desc->lli->block_ts_lo),
 953                le32_to_cpu(desc->lli->ctl_hi),
 954                le32_to_cpu(desc->lli->ctl_lo));
 955}
 956
 957static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
 958                                   struct axi_dma_desc *desc_head)
 959{
 960        int count = atomic_read(&chan->descs_allocated);
 961        int i;
 962
 963        for (i = 0; i < count; i++)
 964                axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
 965}
 966
 967static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
 968{
 969        struct virt_dma_desc *vd;
 970        unsigned long flags;
 971
 972        spin_lock_irqsave(&chan->vc.lock, flags);
 973
 974        axi_chan_disable(chan);
 975
 976        /* The bad descriptor currently is in the head of vc list */
 977        vd = vchan_next_desc(&chan->vc);
 978        /* Remove the completed descriptor from issued list */
 979        list_del(&vd->node);
 980
 981        /* WARN about bad descriptor */
 982        dev_err(chan2dev(chan),
 983                "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
 984                axi_chan_name(chan), vd->tx.cookie, status);
 985        axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
 986
 987        vchan_cookie_complete(vd);
 988
 989        /* Try to restart the controller */
 990        axi_chan_start_first_queued(chan);
 991
 992        spin_unlock_irqrestore(&chan->vc.lock, flags);
 993}
 994
 995static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
 996{
 997        int count = atomic_read(&chan->descs_allocated);
 998        struct axi_dma_hw_desc *hw_desc;
 999        struct axi_dma_desc *desc;
1000        struct virt_dma_desc *vd;
1001        unsigned long flags;
1002        u64 llp;
1003        int i;
1004
1005        spin_lock_irqsave(&chan->vc.lock, flags);
1006        if (unlikely(axi_chan_is_hw_enable(chan))) {
1007                dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1008                        axi_chan_name(chan));
1009                axi_chan_disable(chan);
1010        }
1011
1012        /* The completed descriptor currently is in the head of vc list */
1013        vd = vchan_next_desc(&chan->vc);
1014
1015        if (chan->cyclic) {
1016                desc = vd_to_axi_desc(vd);
1017                if (desc) {
1018                        llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1019                        for (i = 0; i < count; i++) {
1020                                hw_desc = &desc->hw_desc[i];
1021                                if (hw_desc->llp == llp) {
1022                                        axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1023                                        hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1024                                        desc->completed_blocks = i;
1025
1026                                        if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1027                                                vchan_cyclic_callback(vd);
1028                                        break;
1029                                }
1030                        }
1031
1032                        axi_chan_enable(chan);
1033                }
1034        } else {
1035                /* Remove the completed descriptor from issued list before completing */
1036                list_del(&vd->node);
1037                vchan_cookie_complete(vd);
1038
1039                /* Submit queued descriptors after processing the completed ones */
1040                axi_chan_start_first_queued(chan);
1041        }
1042
1043        spin_unlock_irqrestore(&chan->vc.lock, flags);
1044}
1045
1046static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1047{
1048        struct axi_dma_chip *chip = dev_id;
1049        struct dw_axi_dma *dw = chip->dw;
1050        struct axi_dma_chan *chan;
1051
1052        u32 status, i;
1053
1054        /* Disable DMAC inerrupts. We'll enable them after processing chanels */
1055        axi_dma_irq_disable(chip);
1056
1057        /* Poll, clear and process every chanel interrupt status */
1058        for (i = 0; i < dw->hdata->nr_channels; i++) {
1059                chan = &dw->chan[i];
1060                status = axi_chan_irq_read(chan);
1061                axi_chan_irq_clear(chan, status);
1062
1063                dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1064                        axi_chan_name(chan), i, status);
1065
1066                if (status & DWAXIDMAC_IRQ_ALL_ERR)
1067                        axi_chan_handle_err(chan, status);
1068                else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1069                        axi_chan_block_xfer_complete(chan);
1070        }
1071
1072        /* Re-enable interrupts */
1073        axi_dma_irq_enable(chip);
1074
1075        return IRQ_HANDLED;
1076}
1077
1078static int dma_chan_terminate_all(struct dma_chan *dchan)
1079{
1080        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1081        u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1082        unsigned long flags;
1083        u32 val;
1084        int ret;
1085        LIST_HEAD(head);
1086
1087        axi_chan_disable(chan);
1088
1089        ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1090                                        !(val & chan_active), 1000, 10000);
1091        if (ret == -ETIMEDOUT)
1092                dev_warn(dchan2dev(dchan),
1093                         "%s failed to stop\n", axi_chan_name(chan));
1094
1095        if (chan->direction != DMA_MEM_TO_MEM)
1096                dw_axi_dma_set_hw_channel(chan, false);
1097        if (chan->direction == DMA_MEM_TO_DEV)
1098                dw_axi_dma_set_byte_halfword(chan, false);
1099
1100        spin_lock_irqsave(&chan->vc.lock, flags);
1101
1102        vchan_get_all_descriptors(&chan->vc, &head);
1103
1104        chan->cyclic = false;
1105        spin_unlock_irqrestore(&chan->vc.lock, flags);
1106
1107        vchan_dma_desc_free_list(&chan->vc, &head);
1108
1109        dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1110
1111        return 0;
1112}
1113
1114static int dma_chan_pause(struct dma_chan *dchan)
1115{
1116        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1117        unsigned long flags;
1118        unsigned int timeout = 20; /* timeout iterations */
1119        u32 val;
1120
1121        spin_lock_irqsave(&chan->vc.lock, flags);
1122
1123        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1124        val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1125               BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1126        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1127
1128        do  {
1129                if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1130                        break;
1131
1132                udelay(2);
1133        } while (--timeout);
1134
1135        axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1136
1137        chan->is_paused = true;
1138
1139        spin_unlock_irqrestore(&chan->vc.lock, flags);
1140
1141        return timeout ? 0 : -EAGAIN;
1142}
1143
1144/* Called in chan locked context */
1145static inline void axi_chan_resume(struct axi_dma_chan *chan)
1146{
1147        u32 val;
1148
1149        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1150        val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1151        val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1152        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1153
1154        chan->is_paused = false;
1155}
1156
1157static int dma_chan_resume(struct dma_chan *dchan)
1158{
1159        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1160        unsigned long flags;
1161
1162        spin_lock_irqsave(&chan->vc.lock, flags);
1163
1164        if (chan->is_paused)
1165                axi_chan_resume(chan);
1166
1167        spin_unlock_irqrestore(&chan->vc.lock, flags);
1168
1169        return 0;
1170}
1171
1172static int axi_dma_suspend(struct axi_dma_chip *chip)
1173{
1174        axi_dma_irq_disable(chip);
1175        axi_dma_disable(chip);
1176
1177        clk_disable_unprepare(chip->core_clk);
1178        clk_disable_unprepare(chip->cfgr_clk);
1179
1180        return 0;
1181}
1182
1183static int axi_dma_resume(struct axi_dma_chip *chip)
1184{
1185        int ret;
1186
1187        ret = clk_prepare_enable(chip->cfgr_clk);
1188        if (ret < 0)
1189                return ret;
1190
1191        ret = clk_prepare_enable(chip->core_clk);
1192        if (ret < 0)
1193                return ret;
1194
1195        axi_dma_enable(chip);
1196        axi_dma_irq_enable(chip);
1197
1198        return 0;
1199}
1200
1201static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1202{
1203        struct axi_dma_chip *chip = dev_get_drvdata(dev);
1204
1205        return axi_dma_suspend(chip);
1206}
1207
1208static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1209{
1210        struct axi_dma_chip *chip = dev_get_drvdata(dev);
1211
1212        return axi_dma_resume(chip);
1213}
1214
1215static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1216                                            struct of_dma *ofdma)
1217{
1218        struct dw_axi_dma *dw = ofdma->of_dma_data;
1219        struct axi_dma_chan *chan;
1220        struct dma_chan *dchan;
1221
1222        dchan = dma_get_any_slave_channel(&dw->dma);
1223        if (!dchan)
1224                return NULL;
1225
1226        chan = dchan_to_axi_dma_chan(dchan);
1227        chan->hw_handshake_num = dma_spec->args[0];
1228        return dchan;
1229}
1230
1231static int parse_device_properties(struct axi_dma_chip *chip)
1232{
1233        struct device *dev = chip->dev;
1234        u32 tmp, carr[DMAC_MAX_CHANNELS];
1235        int ret;
1236
1237        ret = device_property_read_u32(dev, "dma-channels", &tmp);
1238        if (ret)
1239                return ret;
1240        if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1241                return -EINVAL;
1242
1243        chip->dw->hdata->nr_channels = tmp;
1244
1245        ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1246        if (ret)
1247                return ret;
1248        if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1249                return -EINVAL;
1250
1251        chip->dw->hdata->nr_masters = tmp;
1252
1253        ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1254        if (ret)
1255                return ret;
1256        if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1257                return -EINVAL;
1258
1259        chip->dw->hdata->m_data_width = tmp;
1260
1261        ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1262                                             chip->dw->hdata->nr_channels);
1263        if (ret)
1264                return ret;
1265        for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1266                if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1267                        return -EINVAL;
1268
1269                chip->dw->hdata->block_size[tmp] = carr[tmp];
1270        }
1271
1272        ret = device_property_read_u32_array(dev, "snps,priority", carr,
1273                                             chip->dw->hdata->nr_channels);
1274        if (ret)
1275                return ret;
1276        /* Priority value must be programmed within [0:nr_channels-1] range */
1277        for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1278                if (carr[tmp] >= chip->dw->hdata->nr_channels)
1279                        return -EINVAL;
1280
1281                chip->dw->hdata->priority[tmp] = carr[tmp];
1282        }
1283
1284        /* axi-max-burst-len is optional property */
1285        ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1286        if (!ret) {
1287                if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1288                        return -EINVAL;
1289                if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1290                        return -EINVAL;
1291
1292                chip->dw->hdata->restrict_axi_burst_len = true;
1293                chip->dw->hdata->axi_rw_burst_len = tmp;
1294        }
1295
1296        return 0;
1297}
1298
1299static int dw_probe(struct platform_device *pdev)
1300{
1301        struct device_node *node = pdev->dev.of_node;
1302        struct axi_dma_chip *chip;
1303        struct resource *mem;
1304        struct dw_axi_dma *dw;
1305        struct dw_axi_dma_hcfg *hdata;
1306        u32 i;
1307        int ret;
1308
1309        chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1310        if (!chip)
1311                return -ENOMEM;
1312
1313        dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1314        if (!dw)
1315                return -ENOMEM;
1316
1317        hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1318        if (!hdata)
1319                return -ENOMEM;
1320
1321        chip->dw = dw;
1322        chip->dev = &pdev->dev;
1323        chip->dw->hdata = hdata;
1324
1325        chip->irq = platform_get_irq(pdev, 0);
1326        if (chip->irq < 0)
1327                return chip->irq;
1328
1329        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1330        chip->regs = devm_ioremap_resource(chip->dev, mem);
1331        if (IS_ERR(chip->regs))
1332                return PTR_ERR(chip->regs);
1333
1334        if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
1335                chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1336                if (IS_ERR(chip->apb_regs))
1337                        return PTR_ERR(chip->apb_regs);
1338        }
1339
1340        chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1341        if (IS_ERR(chip->core_clk))
1342                return PTR_ERR(chip->core_clk);
1343
1344        chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1345        if (IS_ERR(chip->cfgr_clk))
1346                return PTR_ERR(chip->cfgr_clk);
1347
1348        ret = parse_device_properties(chip);
1349        if (ret)
1350                return ret;
1351
1352        dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1353                                sizeof(*dw->chan), GFP_KERNEL);
1354        if (!dw->chan)
1355                return -ENOMEM;
1356
1357        ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
1358                               IRQF_SHARED, KBUILD_MODNAME, chip);
1359        if (ret)
1360                return ret;
1361
1362        INIT_LIST_HEAD(&dw->dma.channels);
1363        for (i = 0; i < hdata->nr_channels; i++) {
1364                struct axi_dma_chan *chan = &dw->chan[i];
1365
1366                chan->chip = chip;
1367                chan->id = i;
1368                chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1369                atomic_set(&chan->descs_allocated, 0);
1370
1371                chan->vc.desc_free = vchan_desc_put;
1372                vchan_init(&chan->vc, &dw->dma);
1373        }
1374
1375        /* Set capabilities */
1376        dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1377        dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1378        dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1379
1380        /* DMA capabilities */
1381        dw->dma.chancnt = hdata->nr_channels;
1382        dw->dma.max_burst = hdata->axi_rw_burst_len;
1383        dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1384        dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1385        dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1386        dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1387        dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1388
1389        dw->dma.dev = chip->dev;
1390        dw->dma.device_tx_status = dma_chan_tx_status;
1391        dw->dma.device_issue_pending = dma_chan_issue_pending;
1392        dw->dma.device_terminate_all = dma_chan_terminate_all;
1393        dw->dma.device_pause = dma_chan_pause;
1394        dw->dma.device_resume = dma_chan_resume;
1395
1396        dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1397        dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1398
1399        dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1400        dw->dma.device_synchronize = dw_axi_dma_synchronize;
1401        dw->dma.device_config = dw_axi_dma_chan_slave_config;
1402        dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1403        dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1404
1405        /*
1406         * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1407         * supported blocks is 1024. Device register width is 4 bytes.
1408         * Therefore, set constraint to 1024 * 4.
1409         */
1410        dw->dma.dev->dma_parms = &dw->dma_parms;
1411        dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1412        platform_set_drvdata(pdev, chip);
1413
1414        pm_runtime_enable(chip->dev);
1415
1416        /*
1417         * We can't just call pm_runtime_get here instead of
1418         * pm_runtime_get_noresume + axi_dma_resume because we need
1419         * driver to work also without Runtime PM.
1420         */
1421        pm_runtime_get_noresume(chip->dev);
1422        ret = axi_dma_resume(chip);
1423        if (ret < 0)
1424                goto err_pm_disable;
1425
1426        axi_dma_hw_init(chip);
1427
1428        pm_runtime_put(chip->dev);
1429
1430        ret = dmaenginem_async_device_register(&dw->dma);
1431        if (ret)
1432                goto err_pm_disable;
1433
1434        /* Register with OF helpers for DMA lookups */
1435        ret = of_dma_controller_register(pdev->dev.of_node,
1436                                         dw_axi_dma_of_xlate, dw);
1437        if (ret < 0)
1438                dev_warn(&pdev->dev,
1439                         "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1440
1441        dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1442                 dw->hdata->nr_channels);
1443
1444        return 0;
1445
1446err_pm_disable:
1447        pm_runtime_disable(chip->dev);
1448
1449        return ret;
1450}
1451
1452static int dw_remove(struct platform_device *pdev)
1453{
1454        struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1455        struct dw_axi_dma *dw = chip->dw;
1456        struct axi_dma_chan *chan, *_chan;
1457        u32 i;
1458
1459        /* Enable clk before accessing to registers */
1460        clk_prepare_enable(chip->cfgr_clk);
1461        clk_prepare_enable(chip->core_clk);
1462        axi_dma_irq_disable(chip);
1463        for (i = 0; i < dw->hdata->nr_channels; i++) {
1464                axi_chan_disable(&chip->dw->chan[i]);
1465                axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1466        }
1467        axi_dma_disable(chip);
1468
1469        pm_runtime_disable(chip->dev);
1470        axi_dma_suspend(chip);
1471
1472        devm_free_irq(chip->dev, chip->irq, chip);
1473
1474        of_dma_controller_free(chip->dev->of_node);
1475
1476        list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1477                        vc.chan.device_node) {
1478                list_del(&chan->vc.chan.device_node);
1479                tasklet_kill(&chan->vc.task);
1480        }
1481
1482        return 0;
1483}
1484
1485static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1486        SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1487};
1488
1489static const struct of_device_id dw_dma_of_id_table[] = {
1490        { .compatible = "snps,axi-dma-1.01a" },
1491        { .compatible = "intel,kmb-axi-dma" },
1492        {}
1493};
1494MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1495
1496static struct platform_driver dw_driver = {
1497        .probe          = dw_probe,
1498        .remove         = dw_remove,
1499        .driver = {
1500                .name   = KBUILD_MODNAME,
1501                .of_match_table = dw_dma_of_id_table,
1502                .pm = &dw_axi_dma_pm_ops,
1503        },
1504};
1505module_platform_driver(dw_driver);
1506
1507MODULE_LICENSE("GPL v2");
1508MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1509MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1510