linux/drivers/dma/k3dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2013 - 2015 Linaro Ltd.
   4 * Copyright (c) 2013 Hisilicon Limited.
   5 */
   6#include <linux/sched.h>
   7#include <linux/device.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/dmapool.h>
  10#include <linux/dmaengine.h>
  11#include <linux/init.h>
  12#include <linux/interrupt.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/platform_device.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/of_device.h>
  19#include <linux/of.h>
  20#include <linux/clk.h>
  21#include <linux/of_dma.h>
  22
  23#include "virt-dma.h"
  24
  25#define DRIVER_NAME             "k3-dma"
  26#define DMA_MAX_SIZE            0x1ffc
  27#define DMA_CYCLIC_MAX_PERIOD   0x1000
  28#define LLI_BLOCK_SIZE          (4 * PAGE_SIZE)
  29
  30#define INT_STAT                0x00
  31#define INT_TC1                 0x04
  32#define INT_TC2                 0x08
  33#define INT_ERR1                0x0c
  34#define INT_ERR2                0x10
  35#define INT_TC1_MASK            0x18
  36#define INT_TC2_MASK            0x1c
  37#define INT_ERR1_MASK           0x20
  38#define INT_ERR2_MASK           0x24
  39#define INT_TC1_RAW             0x600
  40#define INT_TC2_RAW             0x608
  41#define INT_ERR1_RAW            0x610
  42#define INT_ERR2_RAW            0x618
  43#define CH_PRI                  0x688
  44#define CH_STAT                 0x690
  45#define CX_CUR_CNT              0x704
  46#define CX_LLI                  0x800
  47#define CX_CNT1                 0x80c
  48#define CX_CNT0                 0x810
  49#define CX_SRC                  0x814
  50#define CX_DST                  0x818
  51#define CX_CFG                  0x81c
  52
  53#define CX_LLI_CHAIN_EN         0x2
  54#define CX_CFG_EN               0x1
  55#define CX_CFG_NODEIRQ          BIT(1)
  56#define CX_CFG_MEM2PER          (0x1 << 2)
  57#define CX_CFG_PER2MEM          (0x2 << 2)
  58#define CX_CFG_SRCINCR          (0x1 << 31)
  59#define CX_CFG_DSTINCR          (0x1 << 30)
  60
  61struct k3_desc_hw {
  62        u32 lli;
  63        u32 reserved[3];
  64        u32 count;
  65        u32 saddr;
  66        u32 daddr;
  67        u32 config;
  68} __aligned(32);
  69
  70struct k3_dma_desc_sw {
  71        struct virt_dma_desc    vd;
  72        dma_addr_t              desc_hw_lli;
  73        size_t                  desc_num;
  74        size_t                  size;
  75        struct k3_desc_hw       *desc_hw;
  76};
  77
  78struct k3_dma_phy;
  79
  80struct k3_dma_chan {
  81        u32                     ccfg;
  82        struct virt_dma_chan    vc;
  83        struct k3_dma_phy       *phy;
  84        struct list_head        node;
  85        dma_addr_t              dev_addr;
  86        enum dma_status         status;
  87        bool                    cyclic;
  88        struct dma_slave_config slave_config;
  89};
  90
  91struct k3_dma_phy {
  92        u32                     idx;
  93        void __iomem            *base;
  94        struct k3_dma_chan      *vchan;
  95        struct k3_dma_desc_sw   *ds_run;
  96        struct k3_dma_desc_sw   *ds_done;
  97};
  98
  99struct k3_dma_dev {
 100        struct dma_device       slave;
 101        void __iomem            *base;
 102        struct tasklet_struct   task;
 103        spinlock_t              lock;
 104        struct list_head        chan_pending;
 105        struct k3_dma_phy       *phy;
 106        struct k3_dma_chan      *chans;
 107        struct clk              *clk;
 108        struct dma_pool         *pool;
 109        u32                     dma_channels;
 110        u32                     dma_requests;
 111        u32                     dma_channel_mask;
 112        unsigned int            irq;
 113};
 114
 115
 116#define K3_FLAG_NOCLK   BIT(1)
 117
 118struct k3dma_soc_data {
 119        unsigned long flags;
 120};
 121
 122
 123#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
 124
 125static int k3_dma_config_write(struct dma_chan *chan,
 126                               enum dma_transfer_direction dir,
 127                               struct dma_slave_config *cfg);
 128
 129static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
 130{
 131        return container_of(chan, struct k3_dma_chan, vc.chan);
 132}
 133
 134static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
 135{
 136        u32 val = 0;
 137
 138        if (on) {
 139                val = readl_relaxed(phy->base + CX_CFG);
 140                val |= CX_CFG_EN;
 141                writel_relaxed(val, phy->base + CX_CFG);
 142        } else {
 143                val = readl_relaxed(phy->base + CX_CFG);
 144                val &= ~CX_CFG_EN;
 145                writel_relaxed(val, phy->base + CX_CFG);
 146        }
 147}
 148
 149static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
 150{
 151        u32 val = 0;
 152
 153        k3_dma_pause_dma(phy, false);
 154
 155        val = 0x1 << phy->idx;
 156        writel_relaxed(val, d->base + INT_TC1_RAW);
 157        writel_relaxed(val, d->base + INT_TC2_RAW);
 158        writel_relaxed(val, d->base + INT_ERR1_RAW);
 159        writel_relaxed(val, d->base + INT_ERR2_RAW);
 160}
 161
 162static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
 163{
 164        writel_relaxed(hw->lli, phy->base + CX_LLI);
 165        writel_relaxed(hw->count, phy->base + CX_CNT0);
 166        writel_relaxed(hw->saddr, phy->base + CX_SRC);
 167        writel_relaxed(hw->daddr, phy->base + CX_DST);
 168        writel_relaxed(hw->config, phy->base + CX_CFG);
 169}
 170
 171static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
 172{
 173        u32 cnt = 0;
 174
 175        cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
 176        cnt &= 0xffff;
 177        return cnt;
 178}
 179
 180static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
 181{
 182        return readl_relaxed(phy->base + CX_LLI);
 183}
 184
 185static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
 186{
 187        return readl_relaxed(d->base + CH_STAT);
 188}
 189
 190static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
 191{
 192        if (on) {
 193                /* set same priority */
 194                writel_relaxed(0x0, d->base + CH_PRI);
 195
 196                /* unmask irq */
 197                writel_relaxed(0xffff, d->base + INT_TC1_MASK);
 198                writel_relaxed(0xffff, d->base + INT_TC2_MASK);
 199                writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
 200                writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
 201        } else {
 202                /* mask irq */
 203                writel_relaxed(0x0, d->base + INT_TC1_MASK);
 204                writel_relaxed(0x0, d->base + INT_TC2_MASK);
 205                writel_relaxed(0x0, d->base + INT_ERR1_MASK);
 206                writel_relaxed(0x0, d->base + INT_ERR2_MASK);
 207        }
 208}
 209
 210static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
 211{
 212        struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
 213        struct k3_dma_phy *p;
 214        struct k3_dma_chan *c;
 215        u32 stat = readl_relaxed(d->base + INT_STAT);
 216        u32 tc1  = readl_relaxed(d->base + INT_TC1);
 217        u32 tc2  = readl_relaxed(d->base + INT_TC2);
 218        u32 err1 = readl_relaxed(d->base + INT_ERR1);
 219        u32 err2 = readl_relaxed(d->base + INT_ERR2);
 220        u32 i, irq_chan = 0;
 221
 222        while (stat) {
 223                i = __ffs(stat);
 224                stat &= ~BIT(i);
 225                if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
 226                        unsigned long flags;
 227
 228                        p = &d->phy[i];
 229                        c = p->vchan;
 230                        if (c && (tc1 & BIT(i))) {
 231                                spin_lock_irqsave(&c->vc.lock, flags);
 232                                if (p->ds_run != NULL) {
 233                                        vchan_cookie_complete(&p->ds_run->vd);
 234                                        p->ds_done = p->ds_run;
 235                                        p->ds_run = NULL;
 236                                }
 237                                spin_unlock_irqrestore(&c->vc.lock, flags);
 238                        }
 239                        if (c && (tc2 & BIT(i))) {
 240                                spin_lock_irqsave(&c->vc.lock, flags);
 241                                if (p->ds_run != NULL)
 242                                        vchan_cyclic_callback(&p->ds_run->vd);
 243                                spin_unlock_irqrestore(&c->vc.lock, flags);
 244                        }
 245                        irq_chan |= BIT(i);
 246                }
 247                if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
 248                        dev_warn(d->slave.dev, "DMA ERR\n");
 249        }
 250
 251        writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
 252        writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
 253        writel_relaxed(err1, d->base + INT_ERR1_RAW);
 254        writel_relaxed(err2, d->base + INT_ERR2_RAW);
 255
 256        if (irq_chan)
 257                tasklet_schedule(&d->task);
 258
 259        if (irq_chan || err1 || err2)
 260                return IRQ_HANDLED;
 261
 262        return IRQ_NONE;
 263}
 264
 265static int k3_dma_start_txd(struct k3_dma_chan *c)
 266{
 267        struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
 268        struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 269
 270        if (!c->phy)
 271                return -EAGAIN;
 272
 273        if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
 274                return -EAGAIN;
 275
 276        /* Avoid losing track of  ds_run if a transaction is in flight */
 277        if (c->phy->ds_run)
 278                return -EAGAIN;
 279
 280        if (vd) {
 281                struct k3_dma_desc_sw *ds =
 282                        container_of(vd, struct k3_dma_desc_sw, vd);
 283                /*
 284                 * fetch and remove request from vc->desc_issued
 285                 * so vc->desc_issued only contains desc pending
 286                 */
 287                list_del(&ds->vd.node);
 288
 289                c->phy->ds_run = ds;
 290                c->phy->ds_done = NULL;
 291                /* start dma */
 292                k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
 293                return 0;
 294        }
 295        c->phy->ds_run = NULL;
 296        c->phy->ds_done = NULL;
 297        return -EAGAIN;
 298}
 299
 300static void k3_dma_tasklet(unsigned long arg)
 301{
 302        struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
 303        struct k3_dma_phy *p;
 304        struct k3_dma_chan *c, *cn;
 305        unsigned pch, pch_alloc = 0;
 306
 307        /* check new dma request of running channel in vc->desc_issued */
 308        list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
 309                spin_lock_irq(&c->vc.lock);
 310                p = c->phy;
 311                if (p && p->ds_done) {
 312                        if (k3_dma_start_txd(c)) {
 313                                /* No current txd associated with this channel */
 314                                dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
 315                                /* Mark this channel free */
 316                                c->phy = NULL;
 317                                p->vchan = NULL;
 318                        }
 319                }
 320                spin_unlock_irq(&c->vc.lock);
 321        }
 322
 323        /* check new channel request in d->chan_pending */
 324        spin_lock_irq(&d->lock);
 325        for (pch = 0; pch < d->dma_channels; pch++) {
 326                if (!(d->dma_channel_mask & (1 << pch)))
 327                        continue;
 328
 329                p = &d->phy[pch];
 330
 331                if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
 332                        c = list_first_entry(&d->chan_pending,
 333                                struct k3_dma_chan, node);
 334                        /* remove from d->chan_pending */
 335                        list_del_init(&c->node);
 336                        pch_alloc |= 1 << pch;
 337                        /* Mark this channel allocated */
 338                        p->vchan = c;
 339                        c->phy = p;
 340                        dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
 341                }
 342        }
 343        spin_unlock_irq(&d->lock);
 344
 345        for (pch = 0; pch < d->dma_channels; pch++) {
 346                if (!(d->dma_channel_mask & (1 << pch)))
 347                        continue;
 348
 349                if (pch_alloc & (1 << pch)) {
 350                        p = &d->phy[pch];
 351                        c = p->vchan;
 352                        if (c) {
 353                                spin_lock_irq(&c->vc.lock);
 354                                k3_dma_start_txd(c);
 355                                spin_unlock_irq(&c->vc.lock);
 356                        }
 357                }
 358        }
 359}
 360
 361static void k3_dma_free_chan_resources(struct dma_chan *chan)
 362{
 363        struct k3_dma_chan *c = to_k3_chan(chan);
 364        struct k3_dma_dev *d = to_k3_dma(chan->device);
 365        unsigned long flags;
 366
 367        spin_lock_irqsave(&d->lock, flags);
 368        list_del_init(&c->node);
 369        spin_unlock_irqrestore(&d->lock, flags);
 370
 371        vchan_free_chan_resources(&c->vc);
 372        c->ccfg = 0;
 373}
 374
 375static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
 376        dma_cookie_t cookie, struct dma_tx_state *state)
 377{
 378        struct k3_dma_chan *c = to_k3_chan(chan);
 379        struct k3_dma_dev *d = to_k3_dma(chan->device);
 380        struct k3_dma_phy *p;
 381        struct virt_dma_desc *vd;
 382        unsigned long flags;
 383        enum dma_status ret;
 384        size_t bytes = 0;
 385
 386        ret = dma_cookie_status(&c->vc.chan, cookie, state);
 387        if (ret == DMA_COMPLETE)
 388                return ret;
 389
 390        spin_lock_irqsave(&c->vc.lock, flags);
 391        p = c->phy;
 392        ret = c->status;
 393
 394        /*
 395         * If the cookie is on our issue queue, then the residue is
 396         * its total size.
 397         */
 398        vd = vchan_find_desc(&c->vc, cookie);
 399        if (vd && !c->cyclic) {
 400                bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
 401        } else if ((!p) || (!p->ds_run)) {
 402                bytes = 0;
 403        } else {
 404                struct k3_dma_desc_sw *ds = p->ds_run;
 405                u32 clli = 0, index = 0;
 406
 407                bytes = k3_dma_get_curr_cnt(d, p);
 408                clli = k3_dma_get_curr_lli(p);
 409                index = ((clli - ds->desc_hw_lli) /
 410                                sizeof(struct k3_desc_hw)) + 1;
 411                for (; index < ds->desc_num; index++) {
 412                        bytes += ds->desc_hw[index].count;
 413                        /* end of lli */
 414                        if (!ds->desc_hw[index].lli)
 415                                break;
 416                }
 417        }
 418        spin_unlock_irqrestore(&c->vc.lock, flags);
 419        dma_set_residue(state, bytes);
 420        return ret;
 421}
 422
 423static void k3_dma_issue_pending(struct dma_chan *chan)
 424{
 425        struct k3_dma_chan *c = to_k3_chan(chan);
 426        struct k3_dma_dev *d = to_k3_dma(chan->device);
 427        unsigned long flags;
 428
 429        spin_lock_irqsave(&c->vc.lock, flags);
 430        /* add request to vc->desc_issued */
 431        if (vchan_issue_pending(&c->vc)) {
 432                spin_lock(&d->lock);
 433                if (!c->phy) {
 434                        if (list_empty(&c->node)) {
 435                                /* if new channel, add chan_pending */
 436                                list_add_tail(&c->node, &d->chan_pending);
 437                                /* check in tasklet */
 438                                tasklet_schedule(&d->task);
 439                                dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
 440                        }
 441                }
 442                spin_unlock(&d->lock);
 443        } else
 444                dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
 445        spin_unlock_irqrestore(&c->vc.lock, flags);
 446}
 447
 448static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
 449                        dma_addr_t src, size_t len, u32 num, u32 ccfg)
 450{
 451        if (num != ds->desc_num - 1)
 452                ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
 453                        sizeof(struct k3_desc_hw);
 454
 455        ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
 456        ds->desc_hw[num].count = len;
 457        ds->desc_hw[num].saddr = src;
 458        ds->desc_hw[num].daddr = dst;
 459        ds->desc_hw[num].config = ccfg;
 460}
 461
 462static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
 463                                                        struct dma_chan *chan)
 464{
 465        struct k3_dma_chan *c = to_k3_chan(chan);
 466        struct k3_dma_desc_sw *ds;
 467        struct k3_dma_dev *d = to_k3_dma(chan->device);
 468        int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
 469
 470        if (num > lli_limit) {
 471                dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
 472                        &c->vc, num, lli_limit);
 473                return NULL;
 474        }
 475
 476        ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
 477        if (!ds)
 478                return NULL;
 479
 480        ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
 481        if (!ds->desc_hw) {
 482                dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
 483                kfree(ds);
 484                return NULL;
 485        }
 486        ds->desc_num = num;
 487        return ds;
 488}
 489
 490static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
 491        struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
 492        size_t len, unsigned long flags)
 493{
 494        struct k3_dma_chan *c = to_k3_chan(chan);
 495        struct k3_dma_desc_sw *ds;
 496        size_t copy = 0;
 497        int num = 0;
 498
 499        if (!len)
 500                return NULL;
 501
 502        num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
 503
 504        ds = k3_dma_alloc_desc_resource(num, chan);
 505        if (!ds)
 506                return NULL;
 507
 508        c->cyclic = 0;
 509        ds->size = len;
 510        num = 0;
 511
 512        if (!c->ccfg) {
 513                /* default is memtomem, without calling device_config */
 514                c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
 515                c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
 516                c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
 517        }
 518
 519        do {
 520                copy = min_t(size_t, len, DMA_MAX_SIZE);
 521                k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
 522
 523                src += copy;
 524                dst += copy;
 525                len -= copy;
 526        } while (len);
 527
 528        ds->desc_hw[num-1].lli = 0;     /* end of link */
 529        return vchan_tx_prep(&c->vc, &ds->vd, flags);
 530}
 531
 532static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
 533        struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
 534        enum dma_transfer_direction dir, unsigned long flags, void *context)
 535{
 536        struct k3_dma_chan *c = to_k3_chan(chan);
 537        struct k3_dma_desc_sw *ds;
 538        size_t len, avail, total = 0;
 539        struct scatterlist *sg;
 540        dma_addr_t addr, src = 0, dst = 0;
 541        int num = sglen, i;
 542
 543        if (sgl == NULL)
 544                return NULL;
 545
 546        c->cyclic = 0;
 547
 548        for_each_sg(sgl, sg, sglen, i) {
 549                avail = sg_dma_len(sg);
 550                if (avail > DMA_MAX_SIZE)
 551                        num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
 552        }
 553
 554        ds = k3_dma_alloc_desc_resource(num, chan);
 555        if (!ds)
 556                return NULL;
 557        num = 0;
 558        k3_dma_config_write(chan, dir, &c->slave_config);
 559
 560        for_each_sg(sgl, sg, sglen, i) {
 561                addr = sg_dma_address(sg);
 562                avail = sg_dma_len(sg);
 563                total += avail;
 564
 565                do {
 566                        len = min_t(size_t, avail, DMA_MAX_SIZE);
 567
 568                        if (dir == DMA_MEM_TO_DEV) {
 569                                src = addr;
 570                                dst = c->dev_addr;
 571                        } else if (dir == DMA_DEV_TO_MEM) {
 572                                src = c->dev_addr;
 573                                dst = addr;
 574                        }
 575
 576                        k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
 577
 578                        addr += len;
 579                        avail -= len;
 580                } while (avail);
 581        }
 582
 583        ds->desc_hw[num-1].lli = 0;     /* end of link */
 584        ds->size = total;
 585        return vchan_tx_prep(&c->vc, &ds->vd, flags);
 586}
 587
 588static struct dma_async_tx_descriptor *
 589k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
 590                       size_t buf_len, size_t period_len,
 591                       enum dma_transfer_direction dir,
 592                       unsigned long flags)
 593{
 594        struct k3_dma_chan *c = to_k3_chan(chan);
 595        struct k3_dma_desc_sw *ds;
 596        size_t len, avail, total = 0;
 597        dma_addr_t addr, src = 0, dst = 0;
 598        int num = 1, since = 0;
 599        size_t modulo = DMA_CYCLIC_MAX_PERIOD;
 600        u32 en_tc2 = 0;
 601
 602        dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
 603               __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
 604               buf_len, period_len, (int)dir);
 605
 606        avail = buf_len;
 607        if (avail > modulo)
 608                num += DIV_ROUND_UP(avail, modulo) - 1;
 609
 610        ds = k3_dma_alloc_desc_resource(num, chan);
 611        if (!ds)
 612                return NULL;
 613
 614        c->cyclic = 1;
 615        addr = buf_addr;
 616        avail = buf_len;
 617        total = avail;
 618        num = 0;
 619        k3_dma_config_write(chan, dir, &c->slave_config);
 620
 621        if (period_len < modulo)
 622                modulo = period_len;
 623
 624        do {
 625                len = min_t(size_t, avail, modulo);
 626
 627                if (dir == DMA_MEM_TO_DEV) {
 628                        src = addr;
 629                        dst = c->dev_addr;
 630                } else if (dir == DMA_DEV_TO_MEM) {
 631                        src = c->dev_addr;
 632                        dst = addr;
 633                }
 634                since += len;
 635                if (since >= period_len) {
 636                        /* descriptor asks for TC2 interrupt on completion */
 637                        en_tc2 = CX_CFG_NODEIRQ;
 638                        since -= period_len;
 639                } else
 640                        en_tc2 = 0;
 641
 642                k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
 643
 644                addr += len;
 645                avail -= len;
 646        } while (avail);
 647
 648        /* "Cyclic" == end of link points back to start of link */
 649        ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
 650
 651        ds->size = total;
 652
 653        return vchan_tx_prep(&c->vc, &ds->vd, flags);
 654}
 655
 656static int k3_dma_config(struct dma_chan *chan,
 657                         struct dma_slave_config *cfg)
 658{
 659        struct k3_dma_chan *c = to_k3_chan(chan);
 660
 661        memcpy(&c->slave_config, cfg, sizeof(*cfg));
 662
 663        return 0;
 664}
 665
 666static int k3_dma_config_write(struct dma_chan *chan,
 667                               enum dma_transfer_direction dir,
 668                               struct dma_slave_config *cfg)
 669{
 670        struct k3_dma_chan *c = to_k3_chan(chan);
 671        u32 maxburst = 0, val = 0;
 672        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 673
 674        if (dir == DMA_DEV_TO_MEM) {
 675                c->ccfg = CX_CFG_DSTINCR;
 676                c->dev_addr = cfg->src_addr;
 677                maxburst = cfg->src_maxburst;
 678                width = cfg->src_addr_width;
 679        } else if (dir == DMA_MEM_TO_DEV) {
 680                c->ccfg = CX_CFG_SRCINCR;
 681                c->dev_addr = cfg->dst_addr;
 682                maxburst = cfg->dst_maxburst;
 683                width = cfg->dst_addr_width;
 684        }
 685        switch (width) {
 686        case DMA_SLAVE_BUSWIDTH_1_BYTE:
 687        case DMA_SLAVE_BUSWIDTH_2_BYTES:
 688        case DMA_SLAVE_BUSWIDTH_4_BYTES:
 689        case DMA_SLAVE_BUSWIDTH_8_BYTES:
 690                val =  __ffs(width);
 691                break;
 692        default:
 693                val = 3;
 694                break;
 695        }
 696        c->ccfg |= (val << 12) | (val << 16);
 697
 698        if ((maxburst == 0) || (maxburst > 16))
 699                val = 15;
 700        else
 701                val = maxburst - 1;
 702        c->ccfg |= (val << 20) | (val << 24);
 703        c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
 704
 705        /* specific request line */
 706        c->ccfg |= c->vc.chan.chan_id << 4;
 707
 708        return 0;
 709}
 710
 711static void k3_dma_free_desc(struct virt_dma_desc *vd)
 712{
 713        struct k3_dma_desc_sw *ds =
 714                container_of(vd, struct k3_dma_desc_sw, vd);
 715        struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
 716
 717        dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
 718        kfree(ds);
 719}
 720
 721static int k3_dma_terminate_all(struct dma_chan *chan)
 722{
 723        struct k3_dma_chan *c = to_k3_chan(chan);
 724        struct k3_dma_dev *d = to_k3_dma(chan->device);
 725        struct k3_dma_phy *p = c->phy;
 726        unsigned long flags;
 727        LIST_HEAD(head);
 728
 729        dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 730
 731        /* Prevent this channel being scheduled */
 732        spin_lock(&d->lock);
 733        list_del_init(&c->node);
 734        spin_unlock(&d->lock);
 735
 736        /* Clear the tx descriptor lists */
 737        spin_lock_irqsave(&c->vc.lock, flags);
 738        vchan_get_all_descriptors(&c->vc, &head);
 739        if (p) {
 740                /* vchan is assigned to a pchan - stop the channel */
 741                k3_dma_terminate_chan(p, d);
 742                c->phy = NULL;
 743                p->vchan = NULL;
 744                if (p->ds_run) {
 745                        vchan_terminate_vdesc(&p->ds_run->vd);
 746                        p->ds_run = NULL;
 747                }
 748                p->ds_done = NULL;
 749        }
 750        spin_unlock_irqrestore(&c->vc.lock, flags);
 751        vchan_dma_desc_free_list(&c->vc, &head);
 752
 753        return 0;
 754}
 755
 756static void k3_dma_synchronize(struct dma_chan *chan)
 757{
 758        struct k3_dma_chan *c = to_k3_chan(chan);
 759
 760        vchan_synchronize(&c->vc);
 761}
 762
 763static int k3_dma_transfer_pause(struct dma_chan *chan)
 764{
 765        struct k3_dma_chan *c = to_k3_chan(chan);
 766        struct k3_dma_dev *d = to_k3_dma(chan->device);
 767        struct k3_dma_phy *p = c->phy;
 768
 769        dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
 770        if (c->status == DMA_IN_PROGRESS) {
 771                c->status = DMA_PAUSED;
 772                if (p) {
 773                        k3_dma_pause_dma(p, false);
 774                } else {
 775                        spin_lock(&d->lock);
 776                        list_del_init(&c->node);
 777                        spin_unlock(&d->lock);
 778                }
 779        }
 780
 781        return 0;
 782}
 783
 784static int k3_dma_transfer_resume(struct dma_chan *chan)
 785{
 786        struct k3_dma_chan *c = to_k3_chan(chan);
 787        struct k3_dma_dev *d = to_k3_dma(chan->device);
 788        struct k3_dma_phy *p = c->phy;
 789        unsigned long flags;
 790
 791        dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
 792        spin_lock_irqsave(&c->vc.lock, flags);
 793        if (c->status == DMA_PAUSED) {
 794                c->status = DMA_IN_PROGRESS;
 795                if (p) {
 796                        k3_dma_pause_dma(p, true);
 797                } else if (!list_empty(&c->vc.desc_issued)) {
 798                        spin_lock(&d->lock);
 799                        list_add_tail(&c->node, &d->chan_pending);
 800                        spin_unlock(&d->lock);
 801                }
 802        }
 803        spin_unlock_irqrestore(&c->vc.lock, flags);
 804
 805        return 0;
 806}
 807
 808static const struct k3dma_soc_data k3_v1_dma_data = {
 809        .flags = 0,
 810};
 811
 812static const struct k3dma_soc_data asp_v1_dma_data = {
 813        .flags = K3_FLAG_NOCLK,
 814};
 815
 816static const struct of_device_id k3_pdma_dt_ids[] = {
 817        { .compatible = "hisilicon,k3-dma-1.0",
 818          .data = &k3_v1_dma_data
 819        },
 820        { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
 821          .data = &asp_v1_dma_data
 822        },
 823        {}
 824};
 825MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
 826
 827static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
 828                                                struct of_dma *ofdma)
 829{
 830        struct k3_dma_dev *d = ofdma->of_dma_data;
 831        unsigned int request = dma_spec->args[0];
 832
 833        if (request >= d->dma_requests)
 834                return NULL;
 835
 836        return dma_get_slave_channel(&(d->chans[request].vc.chan));
 837}
 838
 839static int k3_dma_probe(struct platform_device *op)
 840{
 841        const struct k3dma_soc_data *soc_data;
 842        struct k3_dma_dev *d;
 843        const struct of_device_id *of_id;
 844        int i, ret, irq = 0;
 845
 846        d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
 847        if (!d)
 848                return -ENOMEM;
 849
 850        soc_data = device_get_match_data(&op->dev);
 851        if (!soc_data)
 852                return -EINVAL;
 853
 854        d->base = devm_platform_ioremap_resource(op, 0);
 855        if (IS_ERR(d->base))
 856                return PTR_ERR(d->base);
 857
 858        of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
 859        if (of_id) {
 860                of_property_read_u32((&op->dev)->of_node,
 861                                "dma-channels", &d->dma_channels);
 862                of_property_read_u32((&op->dev)->of_node,
 863                                "dma-requests", &d->dma_requests);
 864                ret = of_property_read_u32((&op->dev)->of_node,
 865                                "dma-channel-mask", &d->dma_channel_mask);
 866                if (ret) {
 867                        dev_warn(&op->dev,
 868                                 "dma-channel-mask doesn't exist, considering all as available.\n");
 869                        d->dma_channel_mask = (u32)~0UL;
 870                }
 871        }
 872
 873        if (!(soc_data->flags & K3_FLAG_NOCLK)) {
 874                d->clk = devm_clk_get(&op->dev, NULL);
 875                if (IS_ERR(d->clk)) {
 876                        dev_err(&op->dev, "no dma clk\n");
 877                        return PTR_ERR(d->clk);
 878                }
 879        }
 880
 881        irq = platform_get_irq(op, 0);
 882        ret = devm_request_irq(&op->dev, irq,
 883                        k3_dma_int_handler, 0, DRIVER_NAME, d);
 884        if (ret)
 885                return ret;
 886
 887        d->irq = irq;
 888
 889        /* A DMA memory pool for LLIs, align on 32-byte boundary */
 890        d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
 891                                        LLI_BLOCK_SIZE, 32, 0);
 892        if (!d->pool)
 893                return -ENOMEM;
 894
 895        /* init phy channel */
 896        d->phy = devm_kcalloc(&op->dev,
 897                d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
 898        if (d->phy == NULL)
 899                return -ENOMEM;
 900
 901        for (i = 0; i < d->dma_channels; i++) {
 902                struct k3_dma_phy *p;
 903
 904                if (!(d->dma_channel_mask & BIT(i)))
 905                        continue;
 906
 907                p = &d->phy[i];
 908                p->idx = i;
 909                p->base = d->base + i * 0x40;
 910        }
 911
 912        INIT_LIST_HEAD(&d->slave.channels);
 913        dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 914        dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
 915        dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 916        d->slave.dev = &op->dev;
 917        d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
 918        d->slave.device_tx_status = k3_dma_tx_status;
 919        d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
 920        d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
 921        d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
 922        d->slave.device_issue_pending = k3_dma_issue_pending;
 923        d->slave.device_config = k3_dma_config;
 924        d->slave.device_pause = k3_dma_transfer_pause;
 925        d->slave.device_resume = k3_dma_transfer_resume;
 926        d->slave.device_terminate_all = k3_dma_terminate_all;
 927        d->slave.device_synchronize = k3_dma_synchronize;
 928        d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
 929
 930        /* init virtual channel */
 931        d->chans = devm_kcalloc(&op->dev,
 932                d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
 933        if (d->chans == NULL)
 934                return -ENOMEM;
 935
 936        for (i = 0; i < d->dma_requests; i++) {
 937                struct k3_dma_chan *c = &d->chans[i];
 938
 939                c->status = DMA_IN_PROGRESS;
 940                INIT_LIST_HEAD(&c->node);
 941                c->vc.desc_free = k3_dma_free_desc;
 942                vchan_init(&c->vc, &d->slave);
 943        }
 944
 945        /* Enable clock before accessing registers */
 946        ret = clk_prepare_enable(d->clk);
 947        if (ret < 0) {
 948                dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
 949                return ret;
 950        }
 951
 952        k3_dma_enable_dma(d, true);
 953
 954        ret = dma_async_device_register(&d->slave);
 955        if (ret)
 956                goto dma_async_register_fail;
 957
 958        ret = of_dma_controller_register((&op->dev)->of_node,
 959                                        k3_of_dma_simple_xlate, d);
 960        if (ret)
 961                goto of_dma_register_fail;
 962
 963        spin_lock_init(&d->lock);
 964        INIT_LIST_HEAD(&d->chan_pending);
 965        tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
 966        platform_set_drvdata(op, d);
 967        dev_info(&op->dev, "initialized\n");
 968
 969        return 0;
 970
 971of_dma_register_fail:
 972        dma_async_device_unregister(&d->slave);
 973dma_async_register_fail:
 974        clk_disable_unprepare(d->clk);
 975        return ret;
 976}
 977
 978static int k3_dma_remove(struct platform_device *op)
 979{
 980        struct k3_dma_chan *c, *cn;
 981        struct k3_dma_dev *d = platform_get_drvdata(op);
 982
 983        dma_async_device_unregister(&d->slave);
 984        of_dma_controller_free((&op->dev)->of_node);
 985
 986        devm_free_irq(&op->dev, d->irq, d);
 987
 988        list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
 989                list_del(&c->vc.chan.device_node);
 990                tasklet_kill(&c->vc.task);
 991        }
 992        tasklet_kill(&d->task);
 993        clk_disable_unprepare(d->clk);
 994        return 0;
 995}
 996
 997#ifdef CONFIG_PM_SLEEP
 998static int k3_dma_suspend_dev(struct device *dev)
 999{
1000        struct k3_dma_dev *d = dev_get_drvdata(dev);
1001        u32 stat = 0;
1002
1003        stat = k3_dma_get_chan_stat(d);
1004        if (stat) {
1005                dev_warn(d->slave.dev,
1006                        "chan %d is running fail to suspend\n", stat);
1007                return -1;
1008        }
1009        k3_dma_enable_dma(d, false);
1010        clk_disable_unprepare(d->clk);
1011        return 0;
1012}
1013
1014static int k3_dma_resume_dev(struct device *dev)
1015{
1016        struct k3_dma_dev *d = dev_get_drvdata(dev);
1017        int ret = 0;
1018
1019        ret = clk_prepare_enable(d->clk);
1020        if (ret < 0) {
1021                dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1022                return ret;
1023        }
1024        k3_dma_enable_dma(d, true);
1025        return 0;
1026}
1027#endif
1028
1029static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1030
1031static struct platform_driver k3_pdma_driver = {
1032        .driver         = {
1033                .name   = DRIVER_NAME,
1034                .pm     = &k3_dma_pmops,
1035                .of_match_table = k3_pdma_dt_ids,
1036        },
1037        .probe          = k3_dma_probe,
1038        .remove         = k3_dma_remove,
1039};
1040
1041module_platform_driver(k3_pdma_driver);
1042
1043MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
1044MODULE_ALIAS("platform:k3dma");
1045MODULE_LICENSE("GPL v2");
1046