linux/drivers/staging/mt7621-dma/ralink-gdma.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
   3 *  GDMA4740 DMAC support
   4 *
   5 *  This program is free software; you can redistribute it and/or modify it
   6 *  under  the terms of the GNU General  Public License as published by the
   7 *  Free Software Foundation;  either version 2 of the License, or (at your
   8 *  option) any later version.
   9 *
  10 */
  11
  12#include <linux/dmaengine.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/init.h>
  16#include <linux/list.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/irq.h>
  22#include <linux/of_dma.h>
  23#include <linux/reset.h>
  24#include <linux/of_device.h>
  25
  26#include "virt-dma.h"
  27
  28#define GDMA_REG_SRC_ADDR(x)            (0x00 + (x) * 0x10)
  29#define GDMA_REG_DST_ADDR(x)            (0x04 + (x) * 0x10)
  30
  31#define GDMA_REG_CTRL0(x)               (0x08 + (x) * 0x10)
  32#define GDMA_REG_CTRL0_TX_MASK          0xffff
  33#define GDMA_REG_CTRL0_TX_SHIFT         16
  34#define GDMA_REG_CTRL0_CURR_MASK        0xff
  35#define GDMA_REG_CTRL0_CURR_SHIFT       8
  36#define GDMA_REG_CTRL0_SRC_ADDR_FIXED   BIT(7)
  37#define GDMA_REG_CTRL0_DST_ADDR_FIXED   BIT(6)
  38#define GDMA_REG_CTRL0_BURST_MASK       0x7
  39#define GDMA_REG_CTRL0_BURST_SHIFT      3
  40#define GDMA_REG_CTRL0_DONE_INT         BIT(2)
  41#define GDMA_REG_CTRL0_ENABLE           BIT(1)
  42#define GDMA_REG_CTRL0_SW_MODE          BIT(0)
  43
  44#define GDMA_REG_CTRL1(x)               (0x0c + (x) * 0x10)
  45#define GDMA_REG_CTRL1_SEG_MASK         0xf
  46#define GDMA_REG_CTRL1_SEG_SHIFT        22
  47#define GDMA_REG_CTRL1_REQ_MASK         0x3f
  48#define GDMA_REG_CTRL1_SRC_REQ_SHIFT    16
  49#define GDMA_REG_CTRL1_DST_REQ_SHIFT    8
  50#define GDMA_REG_CTRL1_CONTINOUS        BIT(14)
  51#define GDMA_REG_CTRL1_NEXT_MASK        0x1f
  52#define GDMA_REG_CTRL1_NEXT_SHIFT       3
  53#define GDMA_REG_CTRL1_COHERENT         BIT(2)
  54#define GDMA_REG_CTRL1_FAIL             BIT(1)
  55#define GDMA_REG_CTRL1_MASK             BIT(0)
  56
  57#define GDMA_REG_UNMASK_INT             0x200
  58#define GDMA_REG_DONE_INT               0x204
  59
  60#define GDMA_REG_GCT                    0x220
  61#define GDMA_REG_GCT_CHAN_MASK          0x3
  62#define GDMA_REG_GCT_CHAN_SHIFT         3
  63#define GDMA_REG_GCT_VER_MASK           0x3
  64#define GDMA_REG_GCT_VER_SHIFT          1
  65#define GDMA_REG_GCT_ARBIT_RR           BIT(0)
  66
  67#define GDMA_REG_REQSTS                 0x2a0
  68#define GDMA_REG_ACKSTS                 0x2a4
  69#define GDMA_REG_FINSTS                 0x2a8
  70
  71/* for RT305X gdma registers */
  72#define GDMA_RT305X_CTRL0_REQ_MASK      0xf
  73#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
  74#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
  75
  76#define GDMA_RT305X_CTRL1_FAIL          BIT(4)
  77#define GDMA_RT305X_CTRL1_NEXT_MASK     0x7
  78#define GDMA_RT305X_CTRL1_NEXT_SHIFT    1
  79
  80#define GDMA_RT305X_STATUS_INT          0x80
  81#define GDMA_RT305X_STATUS_SIGNAL       0x84
  82#define GDMA_RT305X_GCT                 0x88
  83
  84/* for MT7621 gdma registers */
  85#define GDMA_REG_PERF_START(x)          (0x230 + (x) * 0x8)
  86#define GDMA_REG_PERF_END(x)            (0x234 + (x) * 0x8)
  87
  88enum gdma_dma_transfer_size {
  89        GDMA_TRANSFER_SIZE_4BYTE        = 0,
  90        GDMA_TRANSFER_SIZE_8BYTE        = 1,
  91        GDMA_TRANSFER_SIZE_16BYTE       = 2,
  92        GDMA_TRANSFER_SIZE_32BYTE       = 3,
  93        GDMA_TRANSFER_SIZE_64BYTE       = 4,
  94};
  95
  96struct gdma_dma_sg {
  97        dma_addr_t src_addr;
  98        dma_addr_t dst_addr;
  99        u32 len;
 100};
 101
 102struct gdma_dma_desc {
 103        struct virt_dma_desc vdesc;
 104
 105        enum dma_transfer_direction direction;
 106        bool cyclic;
 107
 108        u32 residue;
 109        unsigned int num_sgs;
 110        struct gdma_dma_sg sg[];
 111};
 112
 113struct gdma_dmaengine_chan {
 114        struct virt_dma_chan vchan;
 115        unsigned int id;
 116        unsigned int slave_id;
 117
 118        dma_addr_t fifo_addr;
 119        enum gdma_dma_transfer_size burst_size;
 120
 121        struct gdma_dma_desc *desc;
 122        unsigned int next_sg;
 123};
 124
 125struct gdma_dma_dev {
 126        struct dma_device ddev;
 127        struct device_dma_parameters dma_parms;
 128        struct gdma_data *data;
 129        void __iomem *base;
 130        struct tasklet_struct task;
 131        volatile unsigned long chan_issued;
 132        atomic_t cnt;
 133
 134        struct gdma_dmaengine_chan chan[];
 135};
 136
 137struct gdma_data {
 138        int chancnt;
 139        u32 done_int_reg;
 140        void (*init)(struct gdma_dma_dev *dma_dev);
 141        int (*start_transfer)(struct gdma_dmaengine_chan *chan);
 142};
 143
 144static struct gdma_dma_dev *gdma_dma_chan_get_dev(
 145        struct gdma_dmaengine_chan *chan)
 146{
 147        return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
 148                ddev);
 149}
 150
 151static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
 152{
 153        return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
 154}
 155
 156static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
 157{
 158        return container_of(vdesc, struct gdma_dma_desc, vdesc);
 159}
 160
 161static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
 162                                     unsigned int reg)
 163{
 164        return readl(dma_dev->base + reg);
 165}
 166
 167static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
 168                                  unsigned reg, uint32_t val)
 169{
 170        writel(val, dma_dev->base + reg);
 171}
 172
 173static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
 174{
 175        return kzalloc(sizeof(struct gdma_dma_desc) +
 176                sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
 177}
 178
 179static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
 180{
 181        if (maxburst < 2)
 182                return GDMA_TRANSFER_SIZE_4BYTE;
 183        else if (maxburst < 4)
 184                return GDMA_TRANSFER_SIZE_8BYTE;
 185        else if (maxburst < 8)
 186                return GDMA_TRANSFER_SIZE_16BYTE;
 187        else if (maxburst < 16)
 188                return GDMA_TRANSFER_SIZE_32BYTE;
 189        else
 190                return GDMA_TRANSFER_SIZE_64BYTE;
 191}
 192
 193static int gdma_dma_config(struct dma_chan *c,
 194                           struct dma_slave_config *config)
 195{
 196        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 197        struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
 198
 199        if (config->device_fc) {
 200                dev_err(dma_dev->ddev.dev, "not support flow controller\n");
 201                return -EINVAL;
 202        }
 203
 204        switch (config->direction) {
 205        case DMA_MEM_TO_DEV:
 206                if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
 207                        dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
 208                        return -EINVAL;
 209                }
 210                chan->slave_id = config->slave_id;
 211                chan->fifo_addr = config->dst_addr;
 212                chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
 213                break;
 214        case DMA_DEV_TO_MEM:
 215                if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
 216                        dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
 217                        return -EINVAL;
 218                }
 219                chan->slave_id = config->slave_id;
 220                chan->fifo_addr = config->src_addr;
 221                chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
 222                break;
 223        default:
 224                dev_err(dma_dev->ddev.dev, "direction type %d error\n",
 225                        config->direction);
 226                return -EINVAL;
 227        }
 228
 229        return 0;
 230}
 231
 232static int gdma_dma_terminate_all(struct dma_chan *c)
 233{
 234        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 235        struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
 236        unsigned long flags, timeout;
 237        LIST_HEAD(head);
 238        int i = 0;
 239
 240        spin_lock_irqsave(&chan->vchan.lock, flags);
 241        chan->desc = NULL;
 242        clear_bit(chan->id, &dma_dev->chan_issued);
 243        vchan_get_all_descriptors(&chan->vchan, &head);
 244        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 245
 246        vchan_dma_desc_free_list(&chan->vchan, &head);
 247
 248        /* wait dma transfer complete */
 249        timeout = jiffies + msecs_to_jiffies(5000);
 250        while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
 251                        GDMA_REG_CTRL0_ENABLE) {
 252                if (time_after_eq(jiffies, timeout)) {
 253                        dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
 254                                chan->id);
 255                        /* restore to init value */
 256                        gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
 257                        break;
 258                }
 259                cpu_relax();
 260                i++;
 261        }
 262
 263        if (i)
 264                dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
 265                        chan->id, i);
 266
 267        return 0;
 268}
 269
 270static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
 271{
 272        dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
 273                        "ctr1 %08x, intr %08x, signal %08x\n", id,
 274                        gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
 275                        gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
 276                        gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
 277                        gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
 278                        gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
 279                        gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
 280}
 281
 282static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
 283{
 284        struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
 285        dma_addr_t src_addr, dst_addr;
 286        struct gdma_dma_sg *sg;
 287        uint32_t ctrl0, ctrl1;
 288
 289        /* verify chan is already stopped */
 290        ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
 291        if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
 292                dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
 293                        chan->id, ctrl0);
 294                rt305x_dump_reg(dma_dev, chan->id);
 295                return -EINVAL;
 296        }
 297
 298        sg = &chan->desc->sg[chan->next_sg];
 299        if (chan->desc->direction == DMA_MEM_TO_DEV) {
 300                src_addr = sg->src_addr;
 301                dst_addr = chan->fifo_addr;
 302                ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \
 303                        (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
 304                        (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
 305        } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
 306                src_addr = chan->fifo_addr;
 307                dst_addr = sg->dst_addr;
 308                ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \
 309                        (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
 310                        (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
 311        } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
 312                /*
 313                 * TODO: memcpy function have bugs. sometime it will copy
 314                 * more 8 bytes data when using dmatest verify.
 315                 */
 316                src_addr = sg->src_addr;
 317                dst_addr = sg->dst_addr;
 318                ctrl0 = GDMA_REG_CTRL0_SW_MODE | \
 319                        (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
 320                        (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
 321        } else {
 322                dev_err(dma_dev->ddev.dev, "direction type %d error\n",
 323                        chan->desc->direction);
 324                return -EINVAL;
 325        }
 326
 327        ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
 328                 (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
 329                 GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
 330        ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
 331
 332        chan->next_sg++;
 333        gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
 334        gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
 335        gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
 336
 337        /* make sure next_sg is update */
 338        wmb();
 339        gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
 340
 341        return 0;
 342}
 343
 344static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
 345{
 346        dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
 347                        "ctr1 %08x, unmask %08x, done %08x, " \
 348                        "req %08x, ack %08x, fin %08x\n", id,
 349                        gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
 350                        gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
 351                        gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
 352                        gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
 353                        gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
 354                        gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
 355                        gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
 356                        gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
 357                        gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
 358}
 359
 360static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
 361{
 362        struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
 363        dma_addr_t src_addr, dst_addr;
 364        struct gdma_dma_sg *sg;
 365        uint32_t ctrl0, ctrl1;
 366
 367        /* verify chan is already stopped */
 368        ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
 369        if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
 370                dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
 371                        chan->id, ctrl0);
 372                rt3883_dump_reg(dma_dev, chan->id);
 373                return -EINVAL;
 374        }
 375
 376        sg = &chan->desc->sg[chan->next_sg];
 377        if (chan->desc->direction == DMA_MEM_TO_DEV) {
 378                src_addr = sg->src_addr;
 379                dst_addr = chan->fifo_addr;
 380                ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
 381                ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
 382                        (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
 383        } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
 384                src_addr = chan->fifo_addr;
 385                dst_addr = sg->dst_addr;
 386                ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
 387                ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
 388                        (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
 389                        GDMA_REG_CTRL1_COHERENT;
 390        } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
 391                src_addr = sg->src_addr;
 392                dst_addr = sg->dst_addr;
 393                ctrl0 = GDMA_REG_CTRL0_SW_MODE;
 394                ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
 395                        (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
 396                        GDMA_REG_CTRL1_COHERENT;
 397        } else {
 398                dev_err(dma_dev->ddev.dev, "direction type %d error\n",
 399                        chan->desc->direction);
 400                return -EINVAL;
 401        }
 402
 403        ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
 404                 (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
 405                 GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
 406        ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
 407
 408        chan->next_sg++;
 409        gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
 410        gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
 411        gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
 412
 413        /* make sure next_sg is update */
 414        wmb();
 415        gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
 416
 417        return 0;
 418}
 419
 420static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
 421                                      struct gdma_dmaengine_chan *chan)
 422{
 423        return dma_dev->data->start_transfer(chan);
 424}
 425
 426static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
 427{
 428        struct virt_dma_desc *vdesc;
 429
 430        vdesc = vchan_next_desc(&chan->vchan);
 431        if (!vdesc) {
 432                chan->desc = NULL;
 433                return 0;
 434        }
 435        chan->desc = to_gdma_dma_desc(vdesc);
 436        chan->next_sg = 0;
 437
 438        return 1;
 439}
 440
 441static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
 442                              struct gdma_dmaengine_chan *chan)
 443{
 444        struct gdma_dma_desc *desc;
 445        unsigned long flags;
 446        int chan_issued;
 447
 448        chan_issued = 0;
 449        spin_lock_irqsave(&chan->vchan.lock, flags);
 450        desc = chan->desc;
 451        if (desc) {
 452                if (desc->cyclic) {
 453                        vchan_cyclic_callback(&desc->vdesc);
 454                        if (chan->next_sg == desc->num_sgs)
 455                                chan->next_sg = 0;
 456                        chan_issued = 1;
 457                } else {
 458                        desc->residue -= desc->sg[chan->next_sg - 1].len;
 459                        if (chan->next_sg == desc->num_sgs) {
 460                                list_del(&desc->vdesc.node);
 461                                vchan_cookie_complete(&desc->vdesc);
 462                                chan_issued = gdma_next_desc(chan);
 463                        } else
 464                                chan_issued = 1;
 465                }
 466        } else
 467                dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
 468                        chan->id);
 469        if (chan_issued)
 470                set_bit(chan->id, &dma_dev->chan_issued);
 471        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 472}
 473
 474static irqreturn_t gdma_dma_irq(int irq, void *devid)
 475{
 476        struct gdma_dma_dev *dma_dev = devid;
 477        u32 done, done_reg;
 478        unsigned int i;
 479
 480        done_reg = dma_dev->data->done_int_reg;
 481        done = gdma_dma_read(dma_dev, done_reg);
 482        if (unlikely(!done))
 483                return IRQ_NONE;
 484
 485        /* clean done bits */
 486        gdma_dma_write(dma_dev, done_reg, done);
 487
 488        i = 0;
 489        while (done) {
 490                if (done & 0x1) {
 491                        gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
 492                        atomic_dec(&dma_dev->cnt);
 493                }
 494                done >>= 1;
 495                i++;
 496        }
 497
 498        /* start only have work to do */
 499        if (dma_dev->chan_issued)
 500                tasklet_schedule(&dma_dev->task);
 501
 502        return IRQ_HANDLED;
 503}
 504
 505static void gdma_dma_issue_pending(struct dma_chan *c)
 506{
 507        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 508        struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
 509        unsigned long flags;
 510
 511        spin_lock_irqsave(&chan->vchan.lock, flags);
 512        if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
 513                if (gdma_next_desc(chan)) {
 514                        set_bit(chan->id, &dma_dev->chan_issued);
 515                        tasklet_schedule(&dma_dev->task);
 516                } else
 517                        dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
 518                                chan->id);
 519        }
 520        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 521}
 522
 523static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
 524                struct dma_chan *c, struct scatterlist *sgl,
 525                unsigned int sg_len, enum dma_transfer_direction direction,
 526                unsigned long flags, void *context)
 527{
 528        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 529        struct gdma_dma_desc *desc;
 530        struct scatterlist *sg;
 531        unsigned int i;
 532
 533        desc = gdma_dma_alloc_desc(sg_len);
 534        if (!desc) {
 535                dev_err(c->device->dev, "alloc sg decs error\n");
 536                return NULL;
 537        }
 538        desc->residue = 0;
 539
 540        for_each_sg(sgl, sg, sg_len, i) {
 541                if (direction == DMA_MEM_TO_DEV)
 542                        desc->sg[i].src_addr = sg_dma_address(sg);
 543                else if (direction == DMA_DEV_TO_MEM)
 544                        desc->sg[i].dst_addr = sg_dma_address(sg);
 545                else {
 546                        dev_err(c->device->dev, "direction type %d error\n",
 547                                direction);
 548                        goto free_desc;
 549                }
 550
 551                if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
 552                        dev_err(c->device->dev, "sg len too large %d\n",
 553                                sg_dma_len(sg));
 554                        goto free_desc;
 555                }
 556                desc->sg[i].len = sg_dma_len(sg);
 557                desc->residue += sg_dma_len(sg);
 558        }
 559
 560        desc->num_sgs = sg_len;
 561        desc->direction = direction;
 562        desc->cyclic = false;
 563
 564        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 565
 566free_desc:
 567        kfree(desc);
 568        return NULL;
 569}
 570
 571static struct dma_async_tx_descriptor *gdma_dma_prep_dma_memcpy(
 572                struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
 573                size_t len, unsigned long flags)
 574{
 575        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 576        struct gdma_dma_desc *desc;
 577        unsigned int num_periods, i;
 578        size_t xfer_count;
 579
 580        if (len <= 0)
 581                return NULL;
 582
 583        chan->burst_size = gdma_dma_maxburst(len >> 2);
 584
 585        xfer_count = GDMA_REG_CTRL0_TX_MASK;
 586        num_periods = DIV_ROUND_UP(len, xfer_count);
 587
 588        desc = gdma_dma_alloc_desc(num_periods);
 589        if (!desc) {
 590                dev_err(c->device->dev, "alloc memcpy decs error\n");
 591                return NULL;
 592        }
 593        desc->residue = len;
 594
 595        for (i = 0; i < num_periods; i++) {
 596                desc->sg[i].src_addr = src;
 597                desc->sg[i].dst_addr = dest;
 598                if (len > xfer_count)
 599                        desc->sg[i].len = xfer_count;
 600                else
 601                        desc->sg[i].len = len;
 602                src += desc->sg[i].len;
 603                dest += desc->sg[i].len;
 604                len -= desc->sg[i].len;
 605        }
 606
 607        desc->num_sgs = num_periods;
 608        desc->direction = DMA_MEM_TO_MEM;
 609        desc->cyclic = false;
 610
 611        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 612}
 613
 614static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
 615        struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
 616        size_t period_len, enum dma_transfer_direction direction,
 617        unsigned long flags)
 618{
 619        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 620        struct gdma_dma_desc *desc;
 621        unsigned int num_periods, i;
 622
 623        if (buf_len % period_len)
 624                return NULL;
 625
 626        if (period_len > GDMA_REG_CTRL0_TX_MASK) {
 627                dev_err(c->device->dev, "cyclic len too large %d\n",
 628                        period_len);
 629                return NULL;
 630        }
 631
 632        num_periods = buf_len / period_len;
 633        desc = gdma_dma_alloc_desc(num_periods);
 634        if (!desc) {
 635                dev_err(c->device->dev, "alloc cyclic decs error\n");
 636                return NULL;
 637        }
 638        desc->residue = buf_len;
 639
 640        for (i = 0; i < num_periods; i++) {
 641                if (direction == DMA_MEM_TO_DEV)
 642                        desc->sg[i].src_addr = buf_addr;
 643                else if (direction == DMA_DEV_TO_MEM)
 644                        desc->sg[i].dst_addr = buf_addr;
 645                else {
 646                        dev_err(c->device->dev, "direction type %d error\n",
 647                                direction);
 648                        goto free_desc;
 649                }
 650                desc->sg[i].len = period_len;
 651                buf_addr += period_len;
 652        }
 653
 654        desc->num_sgs = num_periods;
 655        desc->direction = direction;
 656        desc->cyclic = true;
 657
 658        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 659
 660free_desc:
 661        kfree(desc);
 662        return NULL;
 663}
 664
 665static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
 666                                          dma_cookie_t cookie,
 667                                          struct dma_tx_state *state)
 668{
 669        struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
 670        struct virt_dma_desc *vdesc;
 671        enum dma_status status;
 672        unsigned long flags;
 673        struct gdma_dma_desc *desc;
 674
 675        status = dma_cookie_status(c, cookie, state);
 676        if (status == DMA_COMPLETE || !state)
 677                return status;
 678
 679        spin_lock_irqsave(&chan->vchan.lock, flags);
 680        desc = chan->desc;
 681        if (desc && (cookie == desc->vdesc.tx.cookie)) {
 682                /*
 683                 * We never update edesc->residue in the cyclic case, so we
 684                 * can tell the remaining room to the end of the circular
 685                 * buffer.
 686                 */
 687                if (desc->cyclic)
 688                        state->residue = desc->residue -
 689                                ((chan->next_sg - 1) * desc->sg[0].len);
 690                else
 691                        state->residue = desc->residue;
 692        } else {
 693                vdesc = vchan_find_desc(&chan->vchan, cookie);
 694                if (vdesc)
 695                        state->residue = to_gdma_dma_desc(vdesc)->residue;
 696        }
 697        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 698
 699        dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
 700
 701        return status;
 702}
 703
 704static void gdma_dma_free_chan_resources(struct dma_chan *c)
 705{
 706        vchan_free_chan_resources(to_virt_chan(c));
 707}
 708
 709static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
 710{
 711        kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
 712}
 713
 714static void gdma_dma_tasklet(unsigned long arg)
 715{
 716        struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg;
 717        struct gdma_dmaengine_chan *chan;
 718        static unsigned int last_chan;
 719        unsigned int i, chan_mask;
 720
 721        /* record last chan to round robin all chans */
 722        i = last_chan;
 723        chan_mask = dma_dev->data->chancnt - 1;
 724        do {
 725                /*
 726                 * on mt7621. when verify with dmatest with all
 727                 * channel is enable. we need to limit only two
 728                 * channel is working at the same time. otherwise the
 729                 * data will have problem.
 730                 */
 731                if (atomic_read(&dma_dev->cnt) >= 2) {
 732                        last_chan = i;
 733                        break;
 734                }
 735
 736                if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
 737                        chan = &dma_dev->chan[i];
 738                        if (chan->desc) {
 739                                atomic_inc(&dma_dev->cnt);
 740                                gdma_start_transfer(dma_dev, chan);
 741                        } else
 742                                dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id);
 743
 744                        if (!dma_dev->chan_issued)
 745                                break;
 746                }
 747
 748                i = (i + 1) & chan_mask;
 749        } while (i != last_chan);
 750}
 751
 752static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
 753{
 754        uint32_t gct;
 755
 756        /* all chans round robin */
 757        gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
 758
 759        gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
 760        dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
 761                 (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
 762                 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
 763                        GDMA_REG_GCT_CHAN_MASK));
 764}
 765
 766static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
 767{
 768        uint32_t gct;
 769
 770        /* all chans round robin */
 771        gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
 772
 773        gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
 774        dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
 775                 (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
 776                 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
 777                        GDMA_REG_GCT_CHAN_MASK));
 778}
 779
 780static struct gdma_data rt305x_gdma_data = {
 781        .chancnt = 8,
 782        .done_int_reg = GDMA_RT305X_STATUS_INT,
 783        .init = rt305x_gdma_init,
 784        .start_transfer = rt305x_gdma_start_transfer,
 785};
 786
 787static struct gdma_data rt3883_gdma_data = {
 788        .chancnt = 16,
 789        .done_int_reg = GDMA_REG_DONE_INT,
 790        .init = rt3883_gdma_init,
 791        .start_transfer = rt3883_gdma_start_transfer,
 792};
 793
 794static const struct of_device_id gdma_of_match_table[] = {
 795        { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
 796        { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
 797        { },
 798};
 799
 800static int gdma_dma_probe(struct platform_device *pdev)
 801{
 802        const struct of_device_id *match;
 803        struct gdma_dmaengine_chan *chan;
 804        struct gdma_dma_dev *dma_dev;
 805        struct dma_device *dd;
 806        unsigned int i;
 807        struct resource *res;
 808        int ret;
 809        int irq;
 810        void __iomem *base;
 811        struct gdma_data *data;
 812
 813        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 814        if (ret)
 815                return ret;
 816
 817        match = of_match_device(gdma_of_match_table, &pdev->dev);
 818        if (!match)
 819                return -EINVAL;
 820        data = (struct gdma_data *) match->data;
 821
 822        dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) +
 823                        (sizeof(struct gdma_dmaengine_chan) * data->chancnt),
 824                        GFP_KERNEL);
 825        if (!dma_dev) {
 826                dev_err(&pdev->dev, "alloc dma device failed\n");
 827                return -EINVAL;
 828        }
 829        dma_dev->data = data;
 830
 831        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 832        base = devm_ioremap_resource(&pdev->dev, res);
 833        if (IS_ERR(base))
 834                return PTR_ERR(base);
 835        dma_dev->base = base;
 836        tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
 837
 838        irq = platform_get_irq(pdev, 0);
 839        if (irq < 0) {
 840                dev_err(&pdev->dev, "failed to get irq\n");
 841                return -EINVAL;
 842        }
 843        ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
 844                               0, dev_name(&pdev->dev), dma_dev);
 845        if (ret) {
 846                dev_err(&pdev->dev, "failed to request irq\n");
 847                return ret;
 848        }
 849
 850        device_reset(&pdev->dev);
 851
 852        dd = &dma_dev->ddev;
 853        dma_cap_set(DMA_MEMCPY, dd->cap_mask);
 854        dma_cap_set(DMA_SLAVE, dd->cap_mask);
 855        dma_cap_set(DMA_CYCLIC, dd->cap_mask);
 856        dd->device_free_chan_resources = gdma_dma_free_chan_resources;
 857        dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
 858        dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
 859        dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
 860        dd->device_config = gdma_dma_config;
 861        dd->device_terminate_all = gdma_dma_terminate_all;
 862        dd->device_tx_status = gdma_dma_tx_status;
 863        dd->device_issue_pending = gdma_dma_issue_pending;
 864
 865        dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 866        dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 867        dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 868        dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 869
 870        dd->dev = &pdev->dev;
 871        dd->dev->dma_parms = &dma_dev->dma_parms;
 872        dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
 873        INIT_LIST_HEAD(&dd->channels);
 874
 875        for (i = 0; i < data->chancnt; i++) {
 876                chan = &dma_dev->chan[i];
 877                chan->id = i;
 878                chan->vchan.desc_free = gdma_dma_desc_free;
 879                vchan_init(&chan->vchan, dd);
 880        }
 881
 882        /* init hardware */
 883        data->init(dma_dev);
 884
 885        ret = dma_async_device_register(dd);
 886        if (ret) {
 887                dev_err(&pdev->dev, "failed to register dma device\n");
 888                return ret;
 889        }
 890
 891        ret = of_dma_controller_register(pdev->dev.of_node,
 892                                         of_dma_xlate_by_chan_id, dma_dev);
 893        if (ret) {
 894                dev_err(&pdev->dev, "failed to register of dma controller\n");
 895                goto err_unregister;
 896        }
 897
 898        platform_set_drvdata(pdev, dma_dev);
 899
 900        return 0;
 901
 902err_unregister:
 903        dma_async_device_unregister(dd);
 904        return ret;
 905}
 906
 907static int gdma_dma_remove(struct platform_device *pdev)
 908{
 909        struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
 910
 911        tasklet_kill(&dma_dev->task);
 912        of_dma_controller_free(pdev->dev.of_node);
 913        dma_async_device_unregister(&dma_dev->ddev);
 914
 915        return 0;
 916}
 917
 918static struct platform_driver gdma_dma_driver = {
 919        .probe = gdma_dma_probe,
 920        .remove = gdma_dma_remove,
 921        .driver = {
 922                .name = "gdma-rt2880",
 923                .of_match_table = gdma_of_match_table,
 924        },
 925};
 926module_platform_driver(gdma_dma_driver);
 927
 928MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 929MODULE_DESCRIPTION("Ralink/MTK DMA driver");
 930MODULE_LICENSE("GPL v2");
 931