linux/drivers/dma/sprd-dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2017 Spreadtrum Communications Inc.
   3 *
   4 * SPDX-License-Identifier: GPL-2.0
   5 */
   6
   7#include <linux/clk.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/errno.h>
  10#include <linux/init.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/of.h>
  16#include <linux/of_dma.h>
  17#include <linux/of_device.h>
  18#include <linux/pm_runtime.h>
  19#include <linux/slab.h>
  20
  21#include "virt-dma.h"
  22
  23#define SPRD_DMA_CHN_REG_OFFSET         0x1000
  24#define SPRD_DMA_CHN_REG_LENGTH         0x40
  25#define SPRD_DMA_MEMCPY_MIN_SIZE        64
  26
  27/* DMA global registers definition */
  28#define SPRD_DMA_GLB_PAUSE              0x0
  29#define SPRD_DMA_GLB_FRAG_WAIT          0x4
  30#define SPRD_DMA_GLB_REQ_PEND0_EN       0x8
  31#define SPRD_DMA_GLB_REQ_PEND1_EN       0xc
  32#define SPRD_DMA_GLB_INT_RAW_STS        0x10
  33#define SPRD_DMA_GLB_INT_MSK_STS        0x14
  34#define SPRD_DMA_GLB_REQ_STS            0x18
  35#define SPRD_DMA_GLB_CHN_EN_STS         0x1c
  36#define SPRD_DMA_GLB_DEBUG_STS          0x20
  37#define SPRD_DMA_GLB_ARB_SEL_STS        0x24
  38#define SPRD_DMA_GLB_REQ_UID(uid)       (0x4 * ((uid) - 1))
  39#define SPRD_DMA_GLB_REQ_UID_OFFSET     0x2000
  40
  41/* DMA channel registers definition */
  42#define SPRD_DMA_CHN_PAUSE              0x0
  43#define SPRD_DMA_CHN_REQ                0x4
  44#define SPRD_DMA_CHN_CFG                0x8
  45#define SPRD_DMA_CHN_INTC               0xc
  46#define SPRD_DMA_CHN_SRC_ADDR           0x10
  47#define SPRD_DMA_CHN_DES_ADDR           0x14
  48#define SPRD_DMA_CHN_FRG_LEN            0x18
  49#define SPRD_DMA_CHN_BLK_LEN            0x1c
  50#define SPRD_DMA_CHN_TRSC_LEN           0x20
  51#define SPRD_DMA_CHN_TRSF_STEP          0x24
  52#define SPRD_DMA_CHN_WARP_PTR           0x28
  53#define SPRD_DMA_CHN_WARP_TO            0x2c
  54#define SPRD_DMA_CHN_LLIST_PTR          0x30
  55#define SPRD_DMA_CHN_FRAG_STEP          0x34
  56#define SPRD_DMA_CHN_SRC_BLK_STEP       0x38
  57#define SPRD_DMA_CHN_DES_BLK_STEP       0x3c
  58
  59/* SPRD_DMA_CHN_INTC register definition */
  60#define SPRD_DMA_INT_MASK               GENMASK(4, 0)
  61#define SPRD_DMA_INT_CLR_OFFSET         24
  62#define SPRD_DMA_FRAG_INT_EN            BIT(0)
  63#define SPRD_DMA_BLK_INT_EN             BIT(1)
  64#define SPRD_DMA_TRANS_INT_EN           BIT(2)
  65#define SPRD_DMA_LIST_INT_EN            BIT(3)
  66#define SPRD_DMA_CFG_ERR_INT_EN         BIT(4)
  67
  68/* SPRD_DMA_CHN_CFG register definition */
  69#define SPRD_DMA_CHN_EN                 BIT(0)
  70#define SPRD_DMA_WAIT_BDONE_OFFSET      24
  71#define SPRD_DMA_DONOT_WAIT_BDONE       1
  72
  73/* SPRD_DMA_CHN_REQ register definition */
  74#define SPRD_DMA_REQ_EN                 BIT(0)
  75
  76/* SPRD_DMA_CHN_PAUSE register definition */
  77#define SPRD_DMA_PAUSE_EN               BIT(0)
  78#define SPRD_DMA_PAUSE_STS              BIT(2)
  79#define SPRD_DMA_PAUSE_CNT              0x2000
  80
  81/* DMA_CHN_WARP_* register definition */
  82#define SPRD_DMA_HIGH_ADDR_MASK         GENMASK(31, 28)
  83#define SPRD_DMA_LOW_ADDR_MASK          GENMASK(31, 0)
  84#define SPRD_DMA_HIGH_ADDR_OFFSET       4
  85
  86/* SPRD_DMA_CHN_INTC register definition */
  87#define SPRD_DMA_FRAG_INT_STS           BIT(16)
  88#define SPRD_DMA_BLK_INT_STS            BIT(17)
  89#define SPRD_DMA_TRSC_INT_STS           BIT(18)
  90#define SPRD_DMA_LIST_INT_STS           BIT(19)
  91#define SPRD_DMA_CFGERR_INT_STS         BIT(20)
  92#define SPRD_DMA_CHN_INT_STS                                    \
  93        (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS |         \
  94         SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS |        \
  95         SPRD_DMA_CFGERR_INT_STS)
  96
  97/* SPRD_DMA_CHN_FRG_LEN register definition */
  98#define SPRD_DMA_SRC_DATAWIDTH_OFFSET   30
  99#define SPRD_DMA_DES_DATAWIDTH_OFFSET   28
 100#define SPRD_DMA_SWT_MODE_OFFSET        26
 101#define SPRD_DMA_REQ_MODE_OFFSET        24
 102#define SPRD_DMA_REQ_MODE_MASK          GENMASK(1, 0)
 103#define SPRD_DMA_FIX_SEL_OFFSET         21
 104#define SPRD_DMA_FIX_EN_OFFSET          20
 105#define SPRD_DMA_LLIST_END_OFFSET       19
 106#define SPRD_DMA_FRG_LEN_MASK           GENMASK(16, 0)
 107
 108/* SPRD_DMA_CHN_BLK_LEN register definition */
 109#define SPRD_DMA_BLK_LEN_MASK           GENMASK(16, 0)
 110
 111/* SPRD_DMA_CHN_TRSC_LEN register definition */
 112#define SPRD_DMA_TRSC_LEN_MASK          GENMASK(27, 0)
 113
 114/* SPRD_DMA_CHN_TRSF_STEP register definition */
 115#define SPRD_DMA_DEST_TRSF_STEP_OFFSET  16
 116#define SPRD_DMA_SRC_TRSF_STEP_OFFSET   0
 117#define SPRD_DMA_TRSF_STEP_MASK         GENMASK(15, 0)
 118
 119#define SPRD_DMA_SOFTWARE_UID           0
 120
 121/*
 122 * enum sprd_dma_req_mode: define the DMA request mode
 123 * @SPRD_DMA_FRAG_REQ: fragment request mode
 124 * @SPRD_DMA_BLK_REQ: block request mode
 125 * @SPRD_DMA_TRANS_REQ: transaction request mode
 126 * @SPRD_DMA_LIST_REQ: link-list request mode
 127 *
 128 * We have 4 types request mode: fragment mode, block mode, transaction mode
 129 * and linklist mode. One transaction can contain several blocks, one block can
 130 * contain several fragments. Link-list mode means we can save several DMA
 131 * configuration into one reserved memory, then DMA can fetch each DMA
 132 * configuration automatically to start transfer.
 133 */
 134enum sprd_dma_req_mode {
 135        SPRD_DMA_FRAG_REQ,
 136        SPRD_DMA_BLK_REQ,
 137        SPRD_DMA_TRANS_REQ,
 138        SPRD_DMA_LIST_REQ,
 139};
 140
 141/*
 142 * enum sprd_dma_int_type: define the DMA interrupt type
 143 * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
 144 * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
 145 * is done.
 146 * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
 147 * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
 148 * or one block request is done.
 149 * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
 150 * request is done.
 151 * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
 152 * transaction request or fragment request is done.
 153 * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
 154 * transaction request or block request is done.
 155 * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
 156 * is done.
 157 * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
 158 * incorrect.
 159 */
 160enum sprd_dma_int_type {
 161        SPRD_DMA_NO_INT,
 162        SPRD_DMA_FRAG_INT,
 163        SPRD_DMA_BLK_INT,
 164        SPRD_DMA_BLK_FRAG_INT,
 165        SPRD_DMA_TRANS_INT,
 166        SPRD_DMA_TRANS_FRAG_INT,
 167        SPRD_DMA_TRANS_BLK_INT,
 168        SPRD_DMA_LIST_INT,
 169        SPRD_DMA_CFGERR_INT,
 170};
 171
 172/* dma channel hardware configuration */
 173struct sprd_dma_chn_hw {
 174        u32 pause;
 175        u32 req;
 176        u32 cfg;
 177        u32 intc;
 178        u32 src_addr;
 179        u32 des_addr;
 180        u32 frg_len;
 181        u32 blk_len;
 182        u32 trsc_len;
 183        u32 trsf_step;
 184        u32 wrap_ptr;
 185        u32 wrap_to;
 186        u32 llist_ptr;
 187        u32 frg_step;
 188        u32 src_blk_step;
 189        u32 des_blk_step;
 190};
 191
 192/* dma request description */
 193struct sprd_dma_desc {
 194        struct virt_dma_desc    vd;
 195        struct sprd_dma_chn_hw  chn_hw;
 196};
 197
 198/* dma channel description */
 199struct sprd_dma_chn {
 200        struct virt_dma_chan    vc;
 201        void __iomem            *chn_base;
 202        u32                     chn_num;
 203        u32                     dev_id;
 204        struct sprd_dma_desc    *cur_desc;
 205};
 206
 207/* SPRD dma device */
 208struct sprd_dma_dev {
 209        struct dma_device       dma_dev;
 210        void __iomem            *glb_base;
 211        struct clk              *clk;
 212        struct clk              *ashb_clk;
 213        int                     irq;
 214        u32                     total_chns;
 215        struct sprd_dma_chn     channels[0];
 216};
 217
 218static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
 219static struct of_dma_filter_info sprd_dma_info = {
 220        .filter_fn = sprd_dma_filter_fn,
 221};
 222
 223static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
 224{
 225        return container_of(c, struct sprd_dma_chn, vc.chan);
 226}
 227
 228static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
 229{
 230        struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
 231
 232        return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
 233}
 234
 235static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
 236{
 237        return container_of(vd, struct sprd_dma_desc, vd);
 238}
 239
 240static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
 241                                u32 mask, u32 val)
 242{
 243        u32 orig = readl(schan->chn_base + reg);
 244        u32 tmp;
 245
 246        tmp = (orig & ~mask) | val;
 247        writel(tmp, schan->chn_base + reg);
 248}
 249
 250static int sprd_dma_enable(struct sprd_dma_dev *sdev)
 251{
 252        int ret;
 253
 254        ret = clk_prepare_enable(sdev->clk);
 255        if (ret)
 256                return ret;
 257
 258        /*
 259         * The ashb_clk is optional and only for AGCP DMA controller, so we
 260         * need add one condition to check if the ashb_clk need enable.
 261         */
 262        if (!IS_ERR(sdev->ashb_clk))
 263                ret = clk_prepare_enable(sdev->ashb_clk);
 264
 265        return ret;
 266}
 267
 268static void sprd_dma_disable(struct sprd_dma_dev *sdev)
 269{
 270        clk_disable_unprepare(sdev->clk);
 271
 272        /*
 273         * Need to check if we need disable the optional ashb_clk for AGCP DMA.
 274         */
 275        if (!IS_ERR(sdev->ashb_clk))
 276                clk_disable_unprepare(sdev->ashb_clk);
 277}
 278
 279static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
 280{
 281        struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
 282        u32 dev_id = schan->dev_id;
 283
 284        if (dev_id != SPRD_DMA_SOFTWARE_UID) {
 285                u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
 286                                 SPRD_DMA_GLB_REQ_UID(dev_id);
 287
 288                writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
 289        }
 290}
 291
 292static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
 293{
 294        struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
 295        u32 dev_id = schan->dev_id;
 296
 297        if (dev_id != SPRD_DMA_SOFTWARE_UID) {
 298                u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
 299                                 SPRD_DMA_GLB_REQ_UID(dev_id);
 300
 301                writel(0, sdev->glb_base + uid_offset);
 302        }
 303}
 304
 305static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
 306{
 307        sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
 308                            SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
 309                            SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
 310}
 311
 312static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
 313{
 314        sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
 315                            SPRD_DMA_CHN_EN);
 316}
 317
 318static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
 319{
 320        sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
 321}
 322
 323static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
 324{
 325        sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
 326                            SPRD_DMA_REQ_EN);
 327}
 328
 329static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
 330{
 331        struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
 332        u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
 333
 334        if (enable) {
 335                sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
 336                                    SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
 337
 338                do {
 339                        pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
 340                        if (pause & SPRD_DMA_PAUSE_STS)
 341                                break;
 342
 343                        cpu_relax();
 344                } while (--timeout > 0);
 345
 346                if (!timeout)
 347                        dev_warn(sdev->dma_dev.dev,
 348                                 "pause dma controller timeout\n");
 349        } else {
 350                sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
 351                                    SPRD_DMA_PAUSE_EN, 0);
 352        }
 353}
 354
 355static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
 356{
 357        u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
 358
 359        if (!(cfg & SPRD_DMA_CHN_EN))
 360                return;
 361
 362        sprd_dma_pause_resume(schan, true);
 363        sprd_dma_disable_chn(schan);
 364}
 365
 366static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
 367{
 368        unsigned long addr, addr_high;
 369
 370        addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
 371        addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
 372                    SPRD_DMA_HIGH_ADDR_MASK;
 373
 374        return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
 375}
 376
 377static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
 378{
 379        struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
 380        u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
 381                       SPRD_DMA_CHN_INT_STS;
 382
 383        switch (intc_sts) {
 384        case SPRD_DMA_CFGERR_INT_STS:
 385                return SPRD_DMA_CFGERR_INT;
 386
 387        case SPRD_DMA_LIST_INT_STS:
 388                return SPRD_DMA_LIST_INT;
 389
 390        case SPRD_DMA_TRSC_INT_STS:
 391                return SPRD_DMA_TRANS_INT;
 392
 393        case SPRD_DMA_BLK_INT_STS:
 394                return SPRD_DMA_BLK_INT;
 395
 396        case SPRD_DMA_FRAG_INT_STS:
 397                return SPRD_DMA_FRAG_INT;
 398
 399        default:
 400                dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
 401                return SPRD_DMA_NO_INT;
 402        }
 403}
 404
 405static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
 406{
 407        u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
 408
 409        return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
 410}
 411
 412static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
 413                                    struct sprd_dma_desc *sdesc)
 414{
 415        struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
 416
 417        writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
 418        writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
 419        writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
 420        writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
 421        writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
 422        writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
 423        writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
 424        writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
 425        writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
 426        writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
 427        writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
 428        writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
 429        writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
 430        writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
 431        writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
 432        writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
 433}
 434
 435static void sprd_dma_start(struct sprd_dma_chn *schan)
 436{
 437        struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
 438
 439        if (!vd)
 440                return;
 441
 442        list_del(&vd->node);
 443        schan->cur_desc = to_sprd_dma_desc(vd);
 444
 445        /*
 446         * Copy the DMA configuration from DMA descriptor to this hardware
 447         * channel.
 448         */
 449        sprd_dma_set_chn_config(schan, schan->cur_desc);
 450        sprd_dma_set_uid(schan);
 451        sprd_dma_enable_chn(schan);
 452
 453        if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
 454                sprd_dma_soft_request(schan);
 455}
 456
 457static void sprd_dma_stop(struct sprd_dma_chn *schan)
 458{
 459        sprd_dma_stop_and_disable(schan);
 460        sprd_dma_unset_uid(schan);
 461        sprd_dma_clear_int(schan);
 462}
 463
 464static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
 465                                      enum sprd_dma_int_type int_type,
 466                                      enum sprd_dma_req_mode req_mode)
 467{
 468        if (int_type == SPRD_DMA_NO_INT)
 469                return false;
 470
 471        if (int_type >= req_mode + 1)
 472                return true;
 473        else
 474                return false;
 475}
 476
 477static irqreturn_t dma_irq_handle(int irq, void *dev_id)
 478{
 479        struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
 480        u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
 481        struct sprd_dma_chn *schan;
 482        struct sprd_dma_desc *sdesc;
 483        enum sprd_dma_req_mode req_type;
 484        enum sprd_dma_int_type int_type;
 485        bool trans_done = false;
 486        u32 i;
 487
 488        while (irq_status) {
 489                i = __ffs(irq_status);
 490                irq_status &= (irq_status - 1);
 491                schan = &sdev->channels[i];
 492
 493                spin_lock(&schan->vc.lock);
 494                int_type = sprd_dma_get_int_type(schan);
 495                req_type = sprd_dma_get_req_type(schan);
 496                sprd_dma_clear_int(schan);
 497
 498                sdesc = schan->cur_desc;
 499
 500                /* Check if the dma request descriptor is done. */
 501                trans_done = sprd_dma_check_trans_done(sdesc, int_type,
 502                                                       req_type);
 503                if (trans_done == true) {
 504                        vchan_cookie_complete(&sdesc->vd);
 505                        schan->cur_desc = NULL;
 506                        sprd_dma_start(schan);
 507                }
 508                spin_unlock(&schan->vc.lock);
 509        }
 510
 511        return IRQ_HANDLED;
 512}
 513
 514static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
 515{
 516        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 517        int ret;
 518
 519        ret = pm_runtime_get_sync(chan->device->dev);
 520        if (ret < 0)
 521                return ret;
 522
 523        schan->dev_id = SPRD_DMA_SOFTWARE_UID;
 524        return 0;
 525}
 526
 527static void sprd_dma_free_chan_resources(struct dma_chan *chan)
 528{
 529        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 530        unsigned long flags;
 531
 532        spin_lock_irqsave(&schan->vc.lock, flags);
 533        sprd_dma_stop(schan);
 534        spin_unlock_irqrestore(&schan->vc.lock, flags);
 535
 536        vchan_free_chan_resources(&schan->vc);
 537        pm_runtime_put(chan->device->dev);
 538}
 539
 540static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
 541                                          dma_cookie_t cookie,
 542                                          struct dma_tx_state *txstate)
 543{
 544        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 545        struct virt_dma_desc *vd;
 546        unsigned long flags;
 547        enum dma_status ret;
 548        u32 pos;
 549
 550        ret = dma_cookie_status(chan, cookie, txstate);
 551        if (ret == DMA_COMPLETE || !txstate)
 552                return ret;
 553
 554        spin_lock_irqsave(&schan->vc.lock, flags);
 555        vd = vchan_find_desc(&schan->vc, cookie);
 556        if (vd) {
 557                struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
 558                struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
 559
 560                if (hw->trsc_len > 0)
 561                        pos = hw->trsc_len;
 562                else if (hw->blk_len > 0)
 563                        pos = hw->blk_len;
 564                else if (hw->frg_len > 0)
 565                        pos = hw->frg_len;
 566                else
 567                        pos = 0;
 568        } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
 569                pos = sprd_dma_get_dst_addr(schan);
 570        } else {
 571                pos = 0;
 572        }
 573        spin_unlock_irqrestore(&schan->vc.lock, flags);
 574
 575        dma_set_residue(txstate, pos);
 576        return ret;
 577}
 578
 579static void sprd_dma_issue_pending(struct dma_chan *chan)
 580{
 581        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 582        unsigned long flags;
 583
 584        spin_lock_irqsave(&schan->vc.lock, flags);
 585        if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
 586                sprd_dma_start(schan);
 587        spin_unlock_irqrestore(&schan->vc.lock, flags);
 588}
 589
 590static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
 591                           dma_addr_t dest, dma_addr_t src, size_t len)
 592{
 593        struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
 594        struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
 595        u32 datawidth, src_step, des_step, fragment_len;
 596        u32 block_len, req_mode, irq_mode, transcation_len;
 597        u32 fix_mode = 0, fix_en = 0;
 598
 599        if (IS_ALIGNED(len, 4)) {
 600                datawidth = 2;
 601                src_step = 4;
 602                des_step = 4;
 603        } else if (IS_ALIGNED(len, 2)) {
 604                datawidth = 1;
 605                src_step = 2;
 606                des_step = 2;
 607        } else {
 608                datawidth = 0;
 609                src_step = 1;
 610                des_step = 1;
 611        }
 612
 613        fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
 614        if (len <= SPRD_DMA_BLK_LEN_MASK) {
 615                block_len = len;
 616                transcation_len = 0;
 617                req_mode = SPRD_DMA_BLK_REQ;
 618                irq_mode = SPRD_DMA_BLK_INT;
 619        } else {
 620                block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
 621                transcation_len = len;
 622                req_mode = SPRD_DMA_TRANS_REQ;
 623                irq_mode = SPRD_DMA_TRANS_INT;
 624        }
 625
 626        hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
 627        hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
 628                             SPRD_DMA_HIGH_ADDR_MASK);
 629        hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
 630                            SPRD_DMA_HIGH_ADDR_MASK);
 631
 632        hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
 633        hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
 634
 635        if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
 636                fix_en = 0;
 637        } else {
 638                fix_en = 1;
 639                if (src_step)
 640                        fix_mode = 1;
 641                else
 642                        fix_mode = 0;
 643        }
 644
 645        hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
 646                datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
 647                req_mode << SPRD_DMA_REQ_MODE_OFFSET |
 648                fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
 649                fix_en << SPRD_DMA_FIX_EN_OFFSET |
 650                (fragment_len & SPRD_DMA_FRG_LEN_MASK);
 651        hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
 652
 653        hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
 654
 655        switch (irq_mode) {
 656        case SPRD_DMA_NO_INT:
 657                break;
 658
 659        case SPRD_DMA_FRAG_INT:
 660                hw->intc |= SPRD_DMA_FRAG_INT_EN;
 661                break;
 662
 663        case SPRD_DMA_BLK_INT:
 664                hw->intc |= SPRD_DMA_BLK_INT_EN;
 665                break;
 666
 667        case SPRD_DMA_BLK_FRAG_INT:
 668                hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
 669                break;
 670
 671        case SPRD_DMA_TRANS_INT:
 672                hw->intc |= SPRD_DMA_TRANS_INT_EN;
 673                break;
 674
 675        case SPRD_DMA_TRANS_FRAG_INT:
 676                hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
 677                break;
 678
 679        case SPRD_DMA_TRANS_BLK_INT:
 680                hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
 681                break;
 682
 683        case SPRD_DMA_LIST_INT:
 684                hw->intc |= SPRD_DMA_LIST_INT_EN;
 685                break;
 686
 687        case SPRD_DMA_CFGERR_INT:
 688                hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
 689                break;
 690
 691        default:
 692                dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
 693                return -EINVAL;
 694        }
 695
 696        if (transcation_len == 0)
 697                hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
 698        else
 699                hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
 700
 701        hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
 702                        SPRD_DMA_DEST_TRSF_STEP_OFFSET |
 703                        (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
 704                        SPRD_DMA_SRC_TRSF_STEP_OFFSET;
 705
 706        hw->frg_step = 0;
 707        hw->src_blk_step = 0;
 708        hw->des_blk_step = 0;
 709        hw->src_blk_step = 0;
 710        return 0;
 711}
 712
 713struct dma_async_tx_descriptor *
 714sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 715                         size_t len, unsigned long flags)
 716{
 717        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 718        struct sprd_dma_desc *sdesc;
 719        int ret;
 720
 721        sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
 722        if (!sdesc)
 723                return NULL;
 724
 725        ret = sprd_dma_config(chan, sdesc, dest, src, len);
 726        if (ret) {
 727                kfree(sdesc);
 728                return NULL;
 729        }
 730
 731        return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 732}
 733
 734static int sprd_dma_pause(struct dma_chan *chan)
 735{
 736        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 737        unsigned long flags;
 738
 739        spin_lock_irqsave(&schan->vc.lock, flags);
 740        sprd_dma_pause_resume(schan, true);
 741        spin_unlock_irqrestore(&schan->vc.lock, flags);
 742
 743        return 0;
 744}
 745
 746static int sprd_dma_resume(struct dma_chan *chan)
 747{
 748        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 749        unsigned long flags;
 750
 751        spin_lock_irqsave(&schan->vc.lock, flags);
 752        sprd_dma_pause_resume(schan, false);
 753        spin_unlock_irqrestore(&schan->vc.lock, flags);
 754
 755        return 0;
 756}
 757
 758static int sprd_dma_terminate_all(struct dma_chan *chan)
 759{
 760        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 761        unsigned long flags;
 762        LIST_HEAD(head);
 763
 764        spin_lock_irqsave(&schan->vc.lock, flags);
 765        sprd_dma_stop(schan);
 766
 767        vchan_get_all_descriptors(&schan->vc, &head);
 768        spin_unlock_irqrestore(&schan->vc.lock, flags);
 769
 770        vchan_dma_desc_free_list(&schan->vc, &head);
 771        return 0;
 772}
 773
 774static void sprd_dma_free_desc(struct virt_dma_desc *vd)
 775{
 776        struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
 777
 778        kfree(sdesc);
 779}
 780
 781static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
 782{
 783        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
 784        struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
 785        u32 req = *(u32 *)param;
 786
 787        if (req < sdev->total_chns)
 788                return req == schan->chn_num + 1;
 789        else
 790                return false;
 791}
 792
 793static int sprd_dma_probe(struct platform_device *pdev)
 794{
 795        struct device_node *np = pdev->dev.of_node;
 796        struct sprd_dma_dev *sdev;
 797        struct sprd_dma_chn *dma_chn;
 798        struct resource *res;
 799        u32 chn_count;
 800        int ret, i;
 801
 802        ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
 803        if (ret) {
 804                dev_err(&pdev->dev, "get dma channels count failed\n");
 805                return ret;
 806        }
 807
 808        sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) +
 809                            sizeof(*dma_chn) * chn_count,
 810                            GFP_KERNEL);
 811        if (!sdev)
 812                return -ENOMEM;
 813
 814        sdev->clk = devm_clk_get(&pdev->dev, "enable");
 815        if (IS_ERR(sdev->clk)) {
 816                dev_err(&pdev->dev, "get enable clock failed\n");
 817                return PTR_ERR(sdev->clk);
 818        }
 819
 820        /* ashb clock is optional for AGCP DMA */
 821        sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
 822        if (IS_ERR(sdev->ashb_clk))
 823                dev_warn(&pdev->dev, "no optional ashb eb clock\n");
 824
 825        /*
 826         * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
 827         * DMA controller, it can or do not request the irq, which will save
 828         * system power without resuming system by DMA interrupts if AGCP DMA
 829         * does not request the irq. Thus the DMA interrupts property should
 830         * be optional.
 831         */
 832        sdev->irq = platform_get_irq(pdev, 0);
 833        if (sdev->irq > 0) {
 834                ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
 835                                       0, "sprd_dma", (void *)sdev);
 836                if (ret < 0) {
 837                        dev_err(&pdev->dev, "request dma irq failed\n");
 838                        return ret;
 839                }
 840        } else {
 841                dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
 842        }
 843
 844        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 845        sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
 846                                              resource_size(res));
 847        if (!sdev->glb_base)
 848                return -ENOMEM;
 849
 850        dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
 851        sdev->total_chns = chn_count;
 852        sdev->dma_dev.chancnt = chn_count;
 853        INIT_LIST_HEAD(&sdev->dma_dev.channels);
 854        INIT_LIST_HEAD(&sdev->dma_dev.global_node);
 855        sdev->dma_dev.dev = &pdev->dev;
 856        sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
 857        sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
 858        sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
 859        sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
 860        sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
 861        sdev->dma_dev.device_pause = sprd_dma_pause;
 862        sdev->dma_dev.device_resume = sprd_dma_resume;
 863        sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
 864
 865        for (i = 0; i < chn_count; i++) {
 866                dma_chn = &sdev->channels[i];
 867                dma_chn->chn_num = i;
 868                dma_chn->cur_desc = NULL;
 869                /* get each channel's registers base address. */
 870                dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
 871                                    SPRD_DMA_CHN_REG_LENGTH * i;
 872
 873                dma_chn->vc.desc_free = sprd_dma_free_desc;
 874                vchan_init(&dma_chn->vc, &sdev->dma_dev);
 875        }
 876
 877        platform_set_drvdata(pdev, sdev);
 878        ret = sprd_dma_enable(sdev);
 879        if (ret)
 880                return ret;
 881
 882        pm_runtime_set_active(&pdev->dev);
 883        pm_runtime_enable(&pdev->dev);
 884
 885        ret = pm_runtime_get_sync(&pdev->dev);
 886        if (ret < 0)
 887                goto err_rpm;
 888
 889        ret = dma_async_device_register(&sdev->dma_dev);
 890        if (ret < 0) {
 891                dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
 892                goto err_register;
 893        }
 894
 895        sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
 896        ret = of_dma_controller_register(np, of_dma_simple_xlate,
 897                                         &sprd_dma_info);
 898        if (ret)
 899                goto err_of_register;
 900
 901        pm_runtime_put(&pdev->dev);
 902        return 0;
 903
 904err_of_register:
 905        dma_async_device_unregister(&sdev->dma_dev);
 906err_register:
 907        pm_runtime_put_noidle(&pdev->dev);
 908        pm_runtime_disable(&pdev->dev);
 909err_rpm:
 910        sprd_dma_disable(sdev);
 911        return ret;
 912}
 913
 914static int sprd_dma_remove(struct platform_device *pdev)
 915{
 916        struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
 917        struct sprd_dma_chn *c, *cn;
 918        int ret;
 919
 920        ret = pm_runtime_get_sync(&pdev->dev);
 921        if (ret < 0)
 922                return ret;
 923
 924        /* explicitly free the irq */
 925        if (sdev->irq > 0)
 926                devm_free_irq(&pdev->dev, sdev->irq, sdev);
 927
 928        list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
 929                                 vc.chan.device_node) {
 930                list_del(&c->vc.chan.device_node);
 931                tasklet_kill(&c->vc.task);
 932        }
 933
 934        of_dma_controller_free(pdev->dev.of_node);
 935        dma_async_device_unregister(&sdev->dma_dev);
 936        sprd_dma_disable(sdev);
 937
 938        pm_runtime_put_noidle(&pdev->dev);
 939        pm_runtime_disable(&pdev->dev);
 940        return 0;
 941}
 942
 943static const struct of_device_id sprd_dma_match[] = {
 944        { .compatible = "sprd,sc9860-dma", },
 945        {},
 946};
 947
 948static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
 949{
 950        struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
 951
 952        sprd_dma_disable(sdev);
 953        return 0;
 954}
 955
 956static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
 957{
 958        struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
 959        int ret;
 960
 961        ret = sprd_dma_enable(sdev);
 962        if (ret)
 963                dev_err(sdev->dma_dev.dev, "enable dma failed\n");
 964
 965        return ret;
 966}
 967
 968static const struct dev_pm_ops sprd_dma_pm_ops = {
 969        SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
 970                           sprd_dma_runtime_resume,
 971                           NULL)
 972};
 973
 974static struct platform_driver sprd_dma_driver = {
 975        .probe = sprd_dma_probe,
 976        .remove = sprd_dma_remove,
 977        .driver = {
 978                .name = "sprd-dma",
 979                .of_match_table = sprd_dma_match,
 980                .pm = &sprd_dma_pm_ops,
 981        },
 982};
 983module_platform_driver(sprd_dma_driver);
 984
 985MODULE_LICENSE("GPL v2");
 986MODULE_DESCRIPTION("DMA driver for Spreadtrum");
 987MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
 988MODULE_ALIAS("platform:sprd-dma");
 989