linux/drivers/dma/k3dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013 Linaro Ltd.
   3 * Copyright (c) 2013 Hisilicon Limited.
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 */
   9#include <linux/sched.h>
  10#include <linux/device.h>
  11#include <linux/dmaengine.h>
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/platform_device.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/of_device.h>
  20#include <linux/of.h>
  21#include <linux/clk.h>
  22#include <linux/of_dma.h>
  23
  24#include "virt-dma.h"
  25
  26#define DRIVER_NAME             "k3-dma"
  27#define DMA_MAX_SIZE            0x1ffc
  28
  29#define INT_STAT                0x00
  30#define INT_TC1                 0x04
  31#define INT_ERR1                0x0c
  32#define INT_ERR2                0x10
  33#define INT_TC1_MASK            0x18
  34#define INT_ERR1_MASK           0x20
  35#define INT_ERR2_MASK           0x24
  36#define INT_TC1_RAW             0x600
  37#define INT_ERR1_RAW            0x608
  38#define INT_ERR2_RAW            0x610
  39#define CH_PRI                  0x688
  40#define CH_STAT                 0x690
  41#define CX_CUR_CNT              0x704
  42#define CX_LLI                  0x800
  43#define CX_CNT                  0x810
  44#define CX_SRC                  0x814
  45#define CX_DST                  0x818
  46#define CX_CFG                  0x81c
  47#define AXI_CFG                 0x820
  48#define AXI_CFG_DEFAULT         0x201201
  49
  50#define CX_LLI_CHAIN_EN         0x2
  51#define CX_CFG_EN               0x1
  52#define CX_CFG_MEM2PER          (0x1 << 2)
  53#define CX_CFG_PER2MEM          (0x2 << 2)
  54#define CX_CFG_SRCINCR          (0x1 << 31)
  55#define CX_CFG_DSTINCR          (0x1 << 30)
  56
  57struct k3_desc_hw {
  58        u32 lli;
  59        u32 reserved[3];
  60        u32 count;
  61        u32 saddr;
  62        u32 daddr;
  63        u32 config;
  64} __aligned(32);
  65
  66struct k3_dma_desc_sw {
  67        struct virt_dma_desc    vd;
  68        dma_addr_t              desc_hw_lli;
  69        size_t                  desc_num;
  70        size_t                  size;
  71        struct k3_desc_hw       desc_hw[0];
  72};
  73
  74struct k3_dma_phy;
  75
  76struct k3_dma_chan {
  77        u32                     ccfg;
  78        struct virt_dma_chan    vc;
  79        struct k3_dma_phy       *phy;
  80        struct list_head        node;
  81        enum dma_transfer_direction dir;
  82        dma_addr_t              dev_addr;
  83        enum dma_status         status;
  84};
  85
  86struct k3_dma_phy {
  87        u32                     idx;
  88        void __iomem            *base;
  89        struct k3_dma_chan      *vchan;
  90        struct k3_dma_desc_sw   *ds_run;
  91        struct k3_dma_desc_sw   *ds_done;
  92};
  93
  94struct k3_dma_dev {
  95        struct dma_device       slave;
  96        void __iomem            *base;
  97        struct tasklet_struct   task;
  98        spinlock_t              lock;
  99        struct list_head        chan_pending;
 100        struct k3_dma_phy       *phy;
 101        struct k3_dma_chan      *chans;
 102        struct clk              *clk;
 103        u32                     dma_channels;
 104        u32                     dma_requests;
 105};
 106
 107#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
 108
 109static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
 110{
 111        return container_of(chan, struct k3_dma_chan, vc.chan);
 112}
 113
 114static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
 115{
 116        u32 val = 0;
 117
 118        if (on) {
 119                val = readl_relaxed(phy->base + CX_CFG);
 120                val |= CX_CFG_EN;
 121                writel_relaxed(val, phy->base + CX_CFG);
 122        } else {
 123                val = readl_relaxed(phy->base + CX_CFG);
 124                val &= ~CX_CFG_EN;
 125                writel_relaxed(val, phy->base + CX_CFG);
 126        }
 127}
 128
 129static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
 130{
 131        u32 val = 0;
 132
 133        k3_dma_pause_dma(phy, false);
 134
 135        val = 0x1 << phy->idx;
 136        writel_relaxed(val, d->base + INT_TC1_RAW);
 137        writel_relaxed(val, d->base + INT_ERR1_RAW);
 138        writel_relaxed(val, d->base + INT_ERR2_RAW);
 139}
 140
 141static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
 142{
 143        writel_relaxed(hw->lli, phy->base + CX_LLI);
 144        writel_relaxed(hw->count, phy->base + CX_CNT);
 145        writel_relaxed(hw->saddr, phy->base + CX_SRC);
 146        writel_relaxed(hw->daddr, phy->base + CX_DST);
 147        writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
 148        writel_relaxed(hw->config, phy->base + CX_CFG);
 149}
 150
 151static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
 152{
 153        u32 cnt = 0;
 154
 155        cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
 156        cnt &= 0xffff;
 157        return cnt;
 158}
 159
 160static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
 161{
 162        return readl_relaxed(phy->base + CX_LLI);
 163}
 164
 165static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
 166{
 167        return readl_relaxed(d->base + CH_STAT);
 168}
 169
 170static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
 171{
 172        if (on) {
 173                /* set same priority */
 174                writel_relaxed(0x0, d->base + CH_PRI);
 175
 176                /* unmask irq */
 177                writel_relaxed(0xffff, d->base + INT_TC1_MASK);
 178                writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
 179                writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
 180        } else {
 181                /* mask irq */
 182                writel_relaxed(0x0, d->base + INT_TC1_MASK);
 183                writel_relaxed(0x0, d->base + INT_ERR1_MASK);
 184                writel_relaxed(0x0, d->base + INT_ERR2_MASK);
 185        }
 186}
 187
 188static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
 189{
 190        struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
 191        struct k3_dma_phy *p;
 192        struct k3_dma_chan *c;
 193        u32 stat = readl_relaxed(d->base + INT_STAT);
 194        u32 tc1  = readl_relaxed(d->base + INT_TC1);
 195        u32 err1 = readl_relaxed(d->base + INT_ERR1);
 196        u32 err2 = readl_relaxed(d->base + INT_ERR2);
 197        u32 i, irq_chan = 0;
 198
 199        while (stat) {
 200                i = __ffs(stat);
 201                stat &= (stat - 1);
 202                if (likely(tc1 & BIT(i))) {
 203                        p = &d->phy[i];
 204                        c = p->vchan;
 205                        if (c) {
 206                                unsigned long flags;
 207
 208                                spin_lock_irqsave(&c->vc.lock, flags);
 209                                vchan_cookie_complete(&p->ds_run->vd);
 210                                p->ds_done = p->ds_run;
 211                                spin_unlock_irqrestore(&c->vc.lock, flags);
 212                        }
 213                        irq_chan |= BIT(i);
 214                }
 215                if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
 216                        dev_warn(d->slave.dev, "DMA ERR\n");
 217        }
 218
 219        writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
 220        writel_relaxed(err1, d->base + INT_ERR1_RAW);
 221        writel_relaxed(err2, d->base + INT_ERR2_RAW);
 222
 223        if (irq_chan) {
 224                tasklet_schedule(&d->task);
 225                return IRQ_HANDLED;
 226        } else
 227                return IRQ_NONE;
 228}
 229
 230static int k3_dma_start_txd(struct k3_dma_chan *c)
 231{
 232        struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
 233        struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 234
 235        if (!c->phy)
 236                return -EAGAIN;
 237
 238        if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
 239                return -EAGAIN;
 240
 241        if (vd) {
 242                struct k3_dma_desc_sw *ds =
 243                        container_of(vd, struct k3_dma_desc_sw, vd);
 244                /*
 245                 * fetch and remove request from vc->desc_issued
 246                 * so vc->desc_issued only contains desc pending
 247                 */
 248                list_del(&ds->vd.node);
 249                c->phy->ds_run = ds;
 250                c->phy->ds_done = NULL;
 251                /* start dma */
 252                k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
 253                return 0;
 254        }
 255        c->phy->ds_done = NULL;
 256        c->phy->ds_run = NULL;
 257        return -EAGAIN;
 258}
 259
 260static void k3_dma_tasklet(unsigned long arg)
 261{
 262        struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
 263        struct k3_dma_phy *p;
 264        struct k3_dma_chan *c, *cn;
 265        unsigned pch, pch_alloc = 0;
 266
 267        /* check new dma request of running channel in vc->desc_issued */
 268        list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
 269                spin_lock_irq(&c->vc.lock);
 270                p = c->phy;
 271                if (p && p->ds_done) {
 272                        if (k3_dma_start_txd(c)) {
 273                                /* No current txd associated with this channel */
 274                                dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
 275                                /* Mark this channel free */
 276                                c->phy = NULL;
 277                                p->vchan = NULL;
 278                        }
 279                }
 280                spin_unlock_irq(&c->vc.lock);
 281        }
 282
 283        /* check new channel request in d->chan_pending */
 284        spin_lock_irq(&d->lock);
 285        for (pch = 0; pch < d->dma_channels; pch++) {
 286                p = &d->phy[pch];
 287
 288                if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
 289                        c = list_first_entry(&d->chan_pending,
 290                                struct k3_dma_chan, node);
 291                        /* remove from d->chan_pending */
 292                        list_del_init(&c->node);
 293                        pch_alloc |= 1 << pch;
 294                        /* Mark this channel allocated */
 295                        p->vchan = c;
 296                        c->phy = p;
 297                        dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
 298                }
 299        }
 300        spin_unlock_irq(&d->lock);
 301
 302        for (pch = 0; pch < d->dma_channels; pch++) {
 303                if (pch_alloc & (1 << pch)) {
 304                        p = &d->phy[pch];
 305                        c = p->vchan;
 306                        if (c) {
 307                                spin_lock_irq(&c->vc.lock);
 308                                k3_dma_start_txd(c);
 309                                spin_unlock_irq(&c->vc.lock);
 310                        }
 311                }
 312        }
 313}
 314
 315static void k3_dma_free_chan_resources(struct dma_chan *chan)
 316{
 317        struct k3_dma_chan *c = to_k3_chan(chan);
 318        struct k3_dma_dev *d = to_k3_dma(chan->device);
 319        unsigned long flags;
 320
 321        spin_lock_irqsave(&d->lock, flags);
 322        list_del_init(&c->node);
 323        spin_unlock_irqrestore(&d->lock, flags);
 324
 325        vchan_free_chan_resources(&c->vc);
 326        c->ccfg = 0;
 327}
 328
 329static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
 330        dma_cookie_t cookie, struct dma_tx_state *state)
 331{
 332        struct k3_dma_chan *c = to_k3_chan(chan);
 333        struct k3_dma_dev *d = to_k3_dma(chan->device);
 334        struct k3_dma_phy *p;
 335        struct virt_dma_desc *vd;
 336        unsigned long flags;
 337        enum dma_status ret;
 338        size_t bytes = 0;
 339
 340        ret = dma_cookie_status(&c->vc.chan, cookie, state);
 341        if (ret == DMA_COMPLETE)
 342                return ret;
 343
 344        spin_lock_irqsave(&c->vc.lock, flags);
 345        p = c->phy;
 346        ret = c->status;
 347
 348        /*
 349         * If the cookie is on our issue queue, then the residue is
 350         * its total size.
 351         */
 352        vd = vchan_find_desc(&c->vc, cookie);
 353        if (vd) {
 354                bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
 355        } else if ((!p) || (!p->ds_run)) {
 356                bytes = 0;
 357        } else {
 358                struct k3_dma_desc_sw *ds = p->ds_run;
 359                u32 clli = 0, index = 0;
 360
 361                bytes = k3_dma_get_curr_cnt(d, p);
 362                clli = k3_dma_get_curr_lli(p);
 363                index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
 364                for (; index < ds->desc_num; index++) {
 365                        bytes += ds->desc_hw[index].count;
 366                        /* end of lli */
 367                        if (!ds->desc_hw[index].lli)
 368                                break;
 369                }
 370        }
 371        spin_unlock_irqrestore(&c->vc.lock, flags);
 372        dma_set_residue(state, bytes);
 373        return ret;
 374}
 375
 376static void k3_dma_issue_pending(struct dma_chan *chan)
 377{
 378        struct k3_dma_chan *c = to_k3_chan(chan);
 379        struct k3_dma_dev *d = to_k3_dma(chan->device);
 380        unsigned long flags;
 381
 382        spin_lock_irqsave(&c->vc.lock, flags);
 383        /* add request to vc->desc_issued */
 384        if (vchan_issue_pending(&c->vc)) {
 385                spin_lock(&d->lock);
 386                if (!c->phy) {
 387                        if (list_empty(&c->node)) {
 388                                /* if new channel, add chan_pending */
 389                                list_add_tail(&c->node, &d->chan_pending);
 390                                /* check in tasklet */
 391                                tasklet_schedule(&d->task);
 392                                dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
 393                        }
 394                }
 395                spin_unlock(&d->lock);
 396        } else
 397                dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
 398        spin_unlock_irqrestore(&c->vc.lock, flags);
 399}
 400
 401static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
 402                        dma_addr_t src, size_t len, u32 num, u32 ccfg)
 403{
 404        if ((num + 1) < ds->desc_num)
 405                ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
 406                        sizeof(struct k3_desc_hw);
 407        ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
 408        ds->desc_hw[num].count = len;
 409        ds->desc_hw[num].saddr = src;
 410        ds->desc_hw[num].daddr = dst;
 411        ds->desc_hw[num].config = ccfg;
 412}
 413
 414static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
 415        struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
 416        size_t len, unsigned long flags)
 417{
 418        struct k3_dma_chan *c = to_k3_chan(chan);
 419        struct k3_dma_desc_sw *ds;
 420        size_t copy = 0;
 421        int num = 0;
 422
 423        if (!len)
 424                return NULL;
 425
 426        num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
 427        ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
 428        if (!ds) {
 429                dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
 430                return NULL;
 431        }
 432        ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
 433        ds->size = len;
 434        ds->desc_num = num;
 435        num = 0;
 436
 437        if (!c->ccfg) {
 438                /* default is memtomem, without calling device_config */
 439                c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
 440                c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
 441                c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
 442        }
 443
 444        do {
 445                copy = min_t(size_t, len, DMA_MAX_SIZE);
 446                k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
 447
 448                if (c->dir == DMA_MEM_TO_DEV) {
 449                        src += copy;
 450                } else if (c->dir == DMA_DEV_TO_MEM) {
 451                        dst += copy;
 452                } else {
 453                        src += copy;
 454                        dst += copy;
 455                }
 456                len -= copy;
 457        } while (len);
 458
 459        ds->desc_hw[num-1].lli = 0;     /* end of link */
 460        return vchan_tx_prep(&c->vc, &ds->vd, flags);
 461}
 462
 463static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
 464        struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
 465        enum dma_transfer_direction dir, unsigned long flags, void *context)
 466{
 467        struct k3_dma_chan *c = to_k3_chan(chan);
 468        struct k3_dma_desc_sw *ds;
 469        size_t len, avail, total = 0;
 470        struct scatterlist *sg;
 471        dma_addr_t addr, src = 0, dst = 0;
 472        int num = sglen, i;
 473
 474        if (sgl == NULL)
 475                return NULL;
 476
 477        for_each_sg(sgl, sg, sglen, i) {
 478                avail = sg_dma_len(sg);
 479                if (avail > DMA_MAX_SIZE)
 480                        num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
 481        }
 482
 483        ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
 484        if (!ds) {
 485                dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
 486                return NULL;
 487        }
 488        ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
 489        ds->desc_num = num;
 490        num = 0;
 491
 492        for_each_sg(sgl, sg, sglen, i) {
 493                addr = sg_dma_address(sg);
 494                avail = sg_dma_len(sg);
 495                total += avail;
 496
 497                do {
 498                        len = min_t(size_t, avail, DMA_MAX_SIZE);
 499
 500                        if (dir == DMA_MEM_TO_DEV) {
 501                                src = addr;
 502                                dst = c->dev_addr;
 503                        } else if (dir == DMA_DEV_TO_MEM) {
 504                                src = c->dev_addr;
 505                                dst = addr;
 506                        }
 507
 508                        k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
 509
 510                        addr += len;
 511                        avail -= len;
 512                } while (avail);
 513        }
 514
 515        ds->desc_hw[num-1].lli = 0;     /* end of link */
 516        ds->size = total;
 517        return vchan_tx_prep(&c->vc, &ds->vd, flags);
 518}
 519
 520static int k3_dma_config(struct dma_chan *chan,
 521                         struct dma_slave_config *cfg)
 522{
 523        struct k3_dma_chan *c = to_k3_chan(chan);
 524        u32 maxburst = 0, val = 0;
 525        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 526
 527        if (cfg == NULL)
 528                return -EINVAL;
 529        c->dir = cfg->direction;
 530        if (c->dir == DMA_DEV_TO_MEM) {
 531                c->ccfg = CX_CFG_DSTINCR;
 532                c->dev_addr = cfg->src_addr;
 533                maxburst = cfg->src_maxburst;
 534                width = cfg->src_addr_width;
 535        } else if (c->dir == DMA_MEM_TO_DEV) {
 536                c->ccfg = CX_CFG_SRCINCR;
 537                c->dev_addr = cfg->dst_addr;
 538                maxburst = cfg->dst_maxburst;
 539                width = cfg->dst_addr_width;
 540        }
 541        switch (width) {
 542        case DMA_SLAVE_BUSWIDTH_1_BYTE:
 543        case DMA_SLAVE_BUSWIDTH_2_BYTES:
 544        case DMA_SLAVE_BUSWIDTH_4_BYTES:
 545        case DMA_SLAVE_BUSWIDTH_8_BYTES:
 546                val =  __ffs(width);
 547                break;
 548        default:
 549                val = 3;
 550                break;
 551        }
 552        c->ccfg |= (val << 12) | (val << 16);
 553
 554        if ((maxburst == 0) || (maxburst > 16))
 555                val = 16;
 556        else
 557                val = maxburst - 1;
 558        c->ccfg |= (val << 20) | (val << 24);
 559        c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
 560
 561        /* specific request line */
 562        c->ccfg |= c->vc.chan.chan_id << 4;
 563
 564        return 0;
 565}
 566
 567static int k3_dma_terminate_all(struct dma_chan *chan)
 568{
 569        struct k3_dma_chan *c = to_k3_chan(chan);
 570        struct k3_dma_dev *d = to_k3_dma(chan->device);
 571        struct k3_dma_phy *p = c->phy;
 572        unsigned long flags;
 573        LIST_HEAD(head);
 574
 575        dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 576
 577        /* Prevent this channel being scheduled */
 578        spin_lock(&d->lock);
 579        list_del_init(&c->node);
 580        spin_unlock(&d->lock);
 581
 582        /* Clear the tx descriptor lists */
 583        spin_lock_irqsave(&c->vc.lock, flags);
 584        vchan_get_all_descriptors(&c->vc, &head);
 585        if (p) {
 586                /* vchan is assigned to a pchan - stop the channel */
 587                k3_dma_terminate_chan(p, d);
 588                c->phy = NULL;
 589                p->vchan = NULL;
 590                p->ds_run = p->ds_done = NULL;
 591        }
 592        spin_unlock_irqrestore(&c->vc.lock, flags);
 593        vchan_dma_desc_free_list(&c->vc, &head);
 594
 595        return 0;
 596}
 597
 598static int k3_dma_transfer_pause(struct dma_chan *chan)
 599{
 600        struct k3_dma_chan *c = to_k3_chan(chan);
 601        struct k3_dma_dev *d = to_k3_dma(chan->device);
 602        struct k3_dma_phy *p = c->phy;
 603
 604        dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
 605        if (c->status == DMA_IN_PROGRESS) {
 606                c->status = DMA_PAUSED;
 607                if (p) {
 608                        k3_dma_pause_dma(p, false);
 609                } else {
 610                        spin_lock(&d->lock);
 611                        list_del_init(&c->node);
 612                        spin_unlock(&d->lock);
 613                }
 614        }
 615
 616        return 0;
 617}
 618
 619static int k3_dma_transfer_resume(struct dma_chan *chan)
 620{
 621        struct k3_dma_chan *c = to_k3_chan(chan);
 622        struct k3_dma_dev *d = to_k3_dma(chan->device);
 623        struct k3_dma_phy *p = c->phy;
 624        unsigned long flags;
 625
 626        dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
 627        spin_lock_irqsave(&c->vc.lock, flags);
 628        if (c->status == DMA_PAUSED) {
 629                c->status = DMA_IN_PROGRESS;
 630                if (p) {
 631                        k3_dma_pause_dma(p, true);
 632                } else if (!list_empty(&c->vc.desc_issued)) {
 633                        spin_lock(&d->lock);
 634                        list_add_tail(&c->node, &d->chan_pending);
 635                        spin_unlock(&d->lock);
 636                }
 637        }
 638        spin_unlock_irqrestore(&c->vc.lock, flags);
 639
 640        return 0;
 641}
 642
 643static void k3_dma_free_desc(struct virt_dma_desc *vd)
 644{
 645        struct k3_dma_desc_sw *ds =
 646                container_of(vd, struct k3_dma_desc_sw, vd);
 647
 648        kfree(ds);
 649}
 650
 651static const struct of_device_id k3_pdma_dt_ids[] = {
 652        { .compatible = "hisilicon,k3-dma-1.0", },
 653        {}
 654};
 655MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
 656
 657static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
 658                                                struct of_dma *ofdma)
 659{
 660        struct k3_dma_dev *d = ofdma->of_dma_data;
 661        unsigned int request = dma_spec->args[0];
 662
 663        if (request > d->dma_requests)
 664                return NULL;
 665
 666        return dma_get_slave_channel(&(d->chans[request].vc.chan));
 667}
 668
 669static int k3_dma_probe(struct platform_device *op)
 670{
 671        struct k3_dma_dev *d;
 672        const struct of_device_id *of_id;
 673        struct resource *iores;
 674        int i, ret, irq = 0;
 675
 676        iores = platform_get_resource(op, IORESOURCE_MEM, 0);
 677        if (!iores)
 678                return -EINVAL;
 679
 680        d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
 681        if (!d)
 682                return -ENOMEM;
 683
 684        d->base = devm_ioremap_resource(&op->dev, iores);
 685        if (IS_ERR(d->base))
 686                return PTR_ERR(d->base);
 687
 688        of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
 689        if (of_id) {
 690                of_property_read_u32((&op->dev)->of_node,
 691                                "dma-channels", &d->dma_channels);
 692                of_property_read_u32((&op->dev)->of_node,
 693                                "dma-requests", &d->dma_requests);
 694        }
 695
 696        d->clk = devm_clk_get(&op->dev, NULL);
 697        if (IS_ERR(d->clk)) {
 698                dev_err(&op->dev, "no dma clk\n");
 699                return PTR_ERR(d->clk);
 700        }
 701
 702        irq = platform_get_irq(op, 0);
 703        ret = devm_request_irq(&op->dev, irq,
 704                        k3_dma_int_handler, 0, DRIVER_NAME, d);
 705        if (ret)
 706                return ret;
 707
 708        /* init phy channel */
 709        d->phy = devm_kzalloc(&op->dev,
 710                d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
 711        if (d->phy == NULL)
 712                return -ENOMEM;
 713
 714        for (i = 0; i < d->dma_channels; i++) {
 715                struct k3_dma_phy *p = &d->phy[i];
 716
 717                p->idx = i;
 718                p->base = d->base + i * 0x40;
 719        }
 720
 721        INIT_LIST_HEAD(&d->slave.channels);
 722        dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 723        dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
 724        d->slave.dev = &op->dev;
 725        d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
 726        d->slave.device_tx_status = k3_dma_tx_status;
 727        d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
 728        d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
 729        d->slave.device_issue_pending = k3_dma_issue_pending;
 730        d->slave.device_config = k3_dma_config;
 731        d->slave.device_pause = k3_dma_transfer_pause;
 732        d->slave.device_resume = k3_dma_transfer_resume;
 733        d->slave.device_terminate_all = k3_dma_terminate_all;
 734        d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
 735
 736        /* init virtual channel */
 737        d->chans = devm_kzalloc(&op->dev,
 738                d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
 739        if (d->chans == NULL)
 740                return -ENOMEM;
 741
 742        for (i = 0; i < d->dma_requests; i++) {
 743                struct k3_dma_chan *c = &d->chans[i];
 744
 745                c->status = DMA_IN_PROGRESS;
 746                INIT_LIST_HEAD(&c->node);
 747                c->vc.desc_free = k3_dma_free_desc;
 748                vchan_init(&c->vc, &d->slave);
 749        }
 750
 751        /* Enable clock before accessing registers */
 752        ret = clk_prepare_enable(d->clk);
 753        if (ret < 0) {
 754                dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
 755                return ret;
 756        }
 757
 758        k3_dma_enable_dma(d, true);
 759
 760        ret = dma_async_device_register(&d->slave);
 761        if (ret)
 762                return ret;
 763
 764        ret = of_dma_controller_register((&op->dev)->of_node,
 765                                        k3_of_dma_simple_xlate, d);
 766        if (ret)
 767                goto of_dma_register_fail;
 768
 769        spin_lock_init(&d->lock);
 770        INIT_LIST_HEAD(&d->chan_pending);
 771        tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
 772        platform_set_drvdata(op, d);
 773        dev_info(&op->dev, "initialized\n");
 774
 775        return 0;
 776
 777of_dma_register_fail:
 778        dma_async_device_unregister(&d->slave);
 779        return ret;
 780}
 781
 782static int k3_dma_remove(struct platform_device *op)
 783{
 784        struct k3_dma_chan *c, *cn;
 785        struct k3_dma_dev *d = platform_get_drvdata(op);
 786
 787        dma_async_device_unregister(&d->slave);
 788        of_dma_controller_free((&op->dev)->of_node);
 789
 790        list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
 791                list_del(&c->vc.chan.device_node);
 792                tasklet_kill(&c->vc.task);
 793        }
 794        tasklet_kill(&d->task);
 795        clk_disable_unprepare(d->clk);
 796        return 0;
 797}
 798
 799#ifdef CONFIG_PM_SLEEP
 800static int k3_dma_suspend_dev(struct device *dev)
 801{
 802        struct k3_dma_dev *d = dev_get_drvdata(dev);
 803        u32 stat = 0;
 804
 805        stat = k3_dma_get_chan_stat(d);
 806        if (stat) {
 807                dev_warn(d->slave.dev,
 808                        "chan %d is running fail to suspend\n", stat);
 809                return -1;
 810        }
 811        k3_dma_enable_dma(d, false);
 812        clk_disable_unprepare(d->clk);
 813        return 0;
 814}
 815
 816static int k3_dma_resume_dev(struct device *dev)
 817{
 818        struct k3_dma_dev *d = dev_get_drvdata(dev);
 819        int ret = 0;
 820
 821        ret = clk_prepare_enable(d->clk);
 822        if (ret < 0) {
 823                dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
 824                return ret;
 825        }
 826        k3_dma_enable_dma(d, true);
 827        return 0;
 828}
 829#endif
 830
 831static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
 832
 833static struct platform_driver k3_pdma_driver = {
 834        .driver         = {
 835                .name   = DRIVER_NAME,
 836                .pm     = &k3_dma_pmops,
 837                .of_match_table = k3_pdma_dt_ids,
 838        },
 839        .probe          = k3_dma_probe,
 840        .remove         = k3_dma_remove,
 841};
 842
 843module_platform_driver(k3_pdma_driver);
 844
 845MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
 846MODULE_ALIAS("platform:k3dma");
 847MODULE_LICENSE("GPL v2");
 848