linux/drivers/net/ethernet/ti/davinci_cpdma.c
<<
>>
Prefs
   1/*
   2 * Texas Instruments CPDMA Driver
   3 *
   4 * Copyright (C) 2010 Texas Instruments
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation version 2.
   9 *
  10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11 * kind, whether express or implied; without even the implied warranty
  12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 */
  15#include <linux/kernel.h>
  16#include <linux/spinlock.h>
  17#include <linux/device.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/io.h>
  23#include <linux/delay.h>
  24#include <linux/genalloc.h>
  25#include "davinci_cpdma.h"
  26
  27/* DMA Registers */
  28#define CPDMA_TXIDVER           0x00
  29#define CPDMA_TXCONTROL         0x04
  30#define CPDMA_TXTEARDOWN        0x08
  31#define CPDMA_RXIDVER           0x10
  32#define CPDMA_RXCONTROL         0x14
  33#define CPDMA_SOFTRESET         0x1c
  34#define CPDMA_RXTEARDOWN        0x18
  35#define CPDMA_TXINTSTATRAW      0x80
  36#define CPDMA_TXINTSTATMASKED   0x84
  37#define CPDMA_TXINTMASKSET      0x88
  38#define CPDMA_TXINTMASKCLEAR    0x8c
  39#define CPDMA_MACINVECTOR       0x90
  40#define CPDMA_MACEOIVECTOR      0x94
  41#define CPDMA_RXINTSTATRAW      0xa0
  42#define CPDMA_RXINTSTATMASKED   0xa4
  43#define CPDMA_RXINTMASKSET      0xa8
  44#define CPDMA_RXINTMASKCLEAR    0xac
  45#define CPDMA_DMAINTSTATRAW     0xb0
  46#define CPDMA_DMAINTSTATMASKED  0xb4
  47#define CPDMA_DMAINTMASKSET     0xb8
  48#define CPDMA_DMAINTMASKCLEAR   0xbc
  49#define CPDMA_DMAINT_HOSTERR    BIT(1)
  50
  51/* the following exist only if has_ext_regs is set */
  52#define CPDMA_DMACONTROL        0x20
  53#define CPDMA_DMASTATUS         0x24
  54#define CPDMA_RXBUFFOFS         0x28
  55#define CPDMA_EM_CONTROL        0x2c
  56
  57/* Descriptor mode bits */
  58#define CPDMA_DESC_SOP          BIT(31)
  59#define CPDMA_DESC_EOP          BIT(30)
  60#define CPDMA_DESC_OWNER        BIT(29)
  61#define CPDMA_DESC_EOQ          BIT(28)
  62#define CPDMA_DESC_TD_COMPLETE  BIT(27)
  63#define CPDMA_DESC_PASS_CRC     BIT(26)
  64#define CPDMA_DESC_TO_PORT_EN   BIT(20)
  65#define CPDMA_TO_PORT_SHIFT     16
  66#define CPDMA_DESC_PORT_MASK    (BIT(18) | BIT(17) | BIT(16))
  67#define CPDMA_DESC_CRC_LEN      4
  68
  69#define CPDMA_TEARDOWN_VALUE    0xfffffffc
  70
  71struct cpdma_desc {
  72        /* hardware fields */
  73        u32                     hw_next;
  74        u32                     hw_buffer;
  75        u32                     hw_len;
  76        u32                     hw_mode;
  77        /* software fields */
  78        void                    *sw_token;
  79        u32                     sw_buffer;
  80        u32                     sw_len;
  81};
  82
  83struct cpdma_desc_pool {
  84        phys_addr_t             phys;
  85        dma_addr_t              hw_addr;
  86        void __iomem            *iomap;         /* ioremap map */
  87        void                    *cpumap;        /* dma_alloc map */
  88        int                     desc_size, mem_size;
  89        int                     num_desc;
  90        struct device           *dev;
  91        struct gen_pool         *gen_pool;
  92};
  93
  94enum cpdma_state {
  95        CPDMA_STATE_IDLE,
  96        CPDMA_STATE_ACTIVE,
  97        CPDMA_STATE_TEARDOWN,
  98};
  99
 100struct cpdma_ctlr {
 101        enum cpdma_state        state;
 102        struct cpdma_params     params;
 103        struct device           *dev;
 104        struct cpdma_desc_pool  *pool;
 105        spinlock_t              lock;
 106        struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
 107        int chan_num;
 108};
 109
 110struct cpdma_chan {
 111        struct cpdma_desc __iomem       *head, *tail;
 112        void __iomem                    *hdp, *cp, *rxfree;
 113        enum cpdma_state                state;
 114        struct cpdma_ctlr               *ctlr;
 115        int                             chan_num;
 116        spinlock_t                      lock;
 117        int                             count;
 118        u32                             desc_num;
 119        u32                             mask;
 120        cpdma_handler_fn                handler;
 121        enum dma_data_direction         dir;
 122        struct cpdma_chan_stats         stats;
 123        /* offsets into dmaregs */
 124        int     int_set, int_clear, td;
 125};
 126
 127#define tx_chan_num(chan)       (chan)
 128#define rx_chan_num(chan)       ((chan) + CPDMA_MAX_CHANNELS)
 129#define is_rx_chan(chan)        ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
 130#define is_tx_chan(chan)        (!is_rx_chan(chan))
 131#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
 132#define chan_linear(chan)       __chan_linear((chan)->chan_num)
 133
 134/* The following make access to common cpdma_ctlr params more readable */
 135#define dmaregs         params.dmaregs
 136#define num_chan        params.num_chan
 137
 138/* various accessors */
 139#define dma_reg_read(ctlr, ofs)         __raw_readl((ctlr)->dmaregs + (ofs))
 140#define chan_read(chan, fld)            __raw_readl((chan)->fld)
 141#define desc_read(desc, fld)            __raw_readl(&(desc)->fld)
 142#define dma_reg_write(ctlr, ofs, v)     __raw_writel(v, (ctlr)->dmaregs + (ofs))
 143#define chan_write(chan, fld, v)        __raw_writel(v, (chan)->fld)
 144#define desc_write(desc, fld, v)        __raw_writel((u32)(v), &(desc)->fld)
 145
 146#define cpdma_desc_to_port(chan, mode, directed)                        \
 147        do {                                                            \
 148                if (!is_rx_chan(chan) && ((directed == 1) ||            \
 149                                          (directed == 2)))             \
 150                        mode |= (CPDMA_DESC_TO_PORT_EN |                \
 151                                 (directed << CPDMA_TO_PORT_SHIFT));    \
 152        } while (0)
 153
 154static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
 155{
 156        if (!pool)
 157                return;
 158
 159        WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
 160             "cpdma_desc_pool size %d != avail %d",
 161             gen_pool_size(pool->gen_pool),
 162             gen_pool_avail(pool->gen_pool));
 163        if (pool->cpumap)
 164                dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
 165                                  pool->phys);
 166        else
 167                iounmap(pool->iomap);
 168}
 169
 170/*
 171 * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
 172 * emac) have dedicated on-chip memory for these descriptors.  Some other
 173 * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
 174 * abstract out these details
 175 */
 176static struct cpdma_desc_pool *
 177cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
 178                                int size, int align)
 179{
 180        struct cpdma_desc_pool *pool;
 181        int ret;
 182
 183        pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
 184        if (!pool)
 185                goto gen_pool_create_fail;
 186
 187        pool->dev       = dev;
 188        pool->mem_size  = size;
 189        pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
 190        pool->num_desc  = size / pool->desc_size;
 191
 192        pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
 193                                              "cpdma");
 194        if (IS_ERR(pool->gen_pool)) {
 195                dev_err(dev, "pool create failed %ld\n",
 196                        PTR_ERR(pool->gen_pool));
 197                goto gen_pool_create_fail;
 198        }
 199
 200        if (phys) {
 201                pool->phys  = phys;
 202                pool->iomap = ioremap(phys, size); /* should be memremap? */
 203                pool->hw_addr = hw_addr;
 204        } else {
 205                pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
 206                                                  GFP_KERNEL);
 207                pool->iomap = (void __iomem __force *)pool->cpumap;
 208                pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
 209        }
 210
 211        if (!pool->iomap)
 212                goto gen_pool_create_fail;
 213
 214        ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
 215                                pool->phys, pool->mem_size, -1);
 216        if (ret < 0) {
 217                dev_err(dev, "pool add failed %d\n", ret);
 218                goto gen_pool_add_virt_fail;
 219        }
 220
 221        return pool;
 222
 223gen_pool_add_virt_fail:
 224        cpdma_desc_pool_destroy(pool);
 225gen_pool_create_fail:
 226        return NULL;
 227}
 228
 229static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
 230                  struct cpdma_desc __iomem *desc)
 231{
 232        if (!desc)
 233                return 0;
 234        return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
 235}
 236
 237static inline struct cpdma_desc __iomem *
 238desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
 239{
 240        return dma ? pool->iomap + dma - pool->hw_addr : NULL;
 241}
 242
 243static struct cpdma_desc __iomem *
 244cpdma_desc_alloc(struct cpdma_desc_pool *pool)
 245{
 246        return (struct cpdma_desc __iomem *)
 247                gen_pool_alloc(pool->gen_pool, pool->desc_size);
 248}
 249
 250static void cpdma_desc_free(struct cpdma_desc_pool *pool,
 251                            struct cpdma_desc __iomem *desc, int num_desc)
 252{
 253        gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
 254}
 255
 256struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
 257{
 258        struct cpdma_ctlr *ctlr;
 259
 260        ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
 261        if (!ctlr)
 262                return NULL;
 263
 264        ctlr->state = CPDMA_STATE_IDLE;
 265        ctlr->params = *params;
 266        ctlr->dev = params->dev;
 267        ctlr->chan_num = 0;
 268        spin_lock_init(&ctlr->lock);
 269
 270        ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
 271                                            ctlr->params.desc_mem_phys,
 272                                            ctlr->params.desc_hw_addr,
 273                                            ctlr->params.desc_mem_size,
 274                                            ctlr->params.desc_align);
 275        if (!ctlr->pool)
 276                return NULL;
 277
 278        if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
 279                ctlr->num_chan = CPDMA_MAX_CHANNELS;
 280        return ctlr;
 281}
 282EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
 283
 284int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
 285{
 286        unsigned long flags;
 287        int i;
 288
 289        spin_lock_irqsave(&ctlr->lock, flags);
 290        if (ctlr->state != CPDMA_STATE_IDLE) {
 291                spin_unlock_irqrestore(&ctlr->lock, flags);
 292                return -EBUSY;
 293        }
 294
 295        if (ctlr->params.has_soft_reset) {
 296                unsigned timeout = 10 * 100;
 297
 298                dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
 299                while (timeout) {
 300                        if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
 301                                break;
 302                        udelay(10);
 303                        timeout--;
 304                }
 305                WARN_ON(!timeout);
 306        }
 307
 308        for (i = 0; i < ctlr->num_chan; i++) {
 309                __raw_writel(0, ctlr->params.txhdp + 4 * i);
 310                __raw_writel(0, ctlr->params.rxhdp + 4 * i);
 311                __raw_writel(0, ctlr->params.txcp + 4 * i);
 312                __raw_writel(0, ctlr->params.rxcp + 4 * i);
 313        }
 314
 315        dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
 316        dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
 317
 318        dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
 319        dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
 320
 321        ctlr->state = CPDMA_STATE_ACTIVE;
 322
 323        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 324                if (ctlr->channels[i])
 325                        cpdma_chan_start(ctlr->channels[i]);
 326        }
 327        spin_unlock_irqrestore(&ctlr->lock, flags);
 328        return 0;
 329}
 330EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
 331
 332int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 333{
 334        unsigned long flags;
 335        int i;
 336
 337        spin_lock_irqsave(&ctlr->lock, flags);
 338        if (ctlr->state == CPDMA_STATE_TEARDOWN) {
 339                spin_unlock_irqrestore(&ctlr->lock, flags);
 340                return -EINVAL;
 341        }
 342
 343        ctlr->state = CPDMA_STATE_TEARDOWN;
 344        spin_unlock_irqrestore(&ctlr->lock, flags);
 345
 346        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 347                if (ctlr->channels[i])
 348                        cpdma_chan_stop(ctlr->channels[i]);
 349        }
 350
 351        spin_lock_irqsave(&ctlr->lock, flags);
 352        dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
 353        dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
 354
 355        dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
 356        dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
 357
 358        ctlr->state = CPDMA_STATE_IDLE;
 359
 360        spin_unlock_irqrestore(&ctlr->lock, flags);
 361        return 0;
 362}
 363EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
 364
 365int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 366{
 367        int ret = 0, i;
 368
 369        if (!ctlr)
 370                return -EINVAL;
 371
 372        if (ctlr->state != CPDMA_STATE_IDLE)
 373                cpdma_ctlr_stop(ctlr);
 374
 375        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
 376                cpdma_chan_destroy(ctlr->channels[i]);
 377
 378        cpdma_desc_pool_destroy(ctlr->pool);
 379        return ret;
 380}
 381EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
 382
 383int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 384{
 385        unsigned long flags;
 386        int i, reg;
 387
 388        spin_lock_irqsave(&ctlr->lock, flags);
 389        if (ctlr->state != CPDMA_STATE_ACTIVE) {
 390                spin_unlock_irqrestore(&ctlr->lock, flags);
 391                return -EINVAL;
 392        }
 393
 394        reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
 395        dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
 396
 397        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 398                if (ctlr->channels[i])
 399                        cpdma_chan_int_ctrl(ctlr->channels[i], enable);
 400        }
 401
 402        spin_unlock_irqrestore(&ctlr->lock, flags);
 403        return 0;
 404}
 405EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
 406
 407void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
 408{
 409        dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
 410}
 411EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
 412
 413u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
 414{
 415        return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
 416}
 417EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
 418
 419u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
 420{
 421        return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
 422}
 423EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
 424
 425/**
 426 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
 427 * Has to be called under ctlr lock
 428 */
 429static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
 430{
 431        struct cpdma_desc_pool *pool = ctlr->pool;
 432        struct cpdma_chan *chan;
 433        int ch_desc_num;
 434        int i;
 435
 436        if (!ctlr->chan_num)
 437                return;
 438
 439        /* calculate average size of pool slice */
 440        ch_desc_num = pool->num_desc / ctlr->chan_num;
 441
 442        /* split ctlr pool */
 443        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 444                chan = ctlr->channels[i];
 445                if (chan)
 446                        chan->desc_num = ch_desc_num;
 447        }
 448}
 449
 450struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 451                                     cpdma_handler_fn handler, int rx_type)
 452{
 453        int offset = chan_num * 4;
 454        struct cpdma_chan *chan;
 455        unsigned long flags;
 456
 457        chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
 458
 459        if (__chan_linear(chan_num) >= ctlr->num_chan)
 460                return NULL;
 461
 462        chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
 463        if (!chan)
 464                return ERR_PTR(-ENOMEM);
 465
 466        spin_lock_irqsave(&ctlr->lock, flags);
 467        if (ctlr->channels[chan_num]) {
 468                spin_unlock_irqrestore(&ctlr->lock, flags);
 469                devm_kfree(ctlr->dev, chan);
 470                return ERR_PTR(-EBUSY);
 471        }
 472
 473        chan->ctlr      = ctlr;
 474        chan->state     = CPDMA_STATE_IDLE;
 475        chan->chan_num  = chan_num;
 476        chan->handler   = handler;
 477        chan->desc_num = ctlr->pool->num_desc / 2;
 478
 479        if (is_rx_chan(chan)) {
 480                chan->hdp       = ctlr->params.rxhdp + offset;
 481                chan->cp        = ctlr->params.rxcp + offset;
 482                chan->rxfree    = ctlr->params.rxfree + offset;
 483                chan->int_set   = CPDMA_RXINTMASKSET;
 484                chan->int_clear = CPDMA_RXINTMASKCLEAR;
 485                chan->td        = CPDMA_RXTEARDOWN;
 486                chan->dir       = DMA_FROM_DEVICE;
 487        } else {
 488                chan->hdp       = ctlr->params.txhdp + offset;
 489                chan->cp        = ctlr->params.txcp + offset;
 490                chan->int_set   = CPDMA_TXINTMASKSET;
 491                chan->int_clear = CPDMA_TXINTMASKCLEAR;
 492                chan->td        = CPDMA_TXTEARDOWN;
 493                chan->dir       = DMA_TO_DEVICE;
 494        }
 495        chan->mask = BIT(chan_linear(chan));
 496
 497        spin_lock_init(&chan->lock);
 498
 499        ctlr->channels[chan_num] = chan;
 500        ctlr->chan_num++;
 501
 502        cpdma_chan_split_pool(ctlr);
 503
 504        spin_unlock_irqrestore(&ctlr->lock, flags);
 505        return chan;
 506}
 507EXPORT_SYMBOL_GPL(cpdma_chan_create);
 508
 509int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
 510{
 511        unsigned long flags;
 512        int desc_num;
 513
 514        spin_lock_irqsave(&chan->lock, flags);
 515        desc_num = chan->desc_num;
 516        spin_unlock_irqrestore(&chan->lock, flags);
 517
 518        return desc_num;
 519}
 520EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
 521
 522int cpdma_chan_destroy(struct cpdma_chan *chan)
 523{
 524        struct cpdma_ctlr *ctlr;
 525        unsigned long flags;
 526
 527        if (!chan)
 528                return -EINVAL;
 529        ctlr = chan->ctlr;
 530
 531        spin_lock_irqsave(&ctlr->lock, flags);
 532        if (chan->state != CPDMA_STATE_IDLE)
 533                cpdma_chan_stop(chan);
 534        ctlr->channels[chan->chan_num] = NULL;
 535        ctlr->chan_num--;
 536
 537        cpdma_chan_split_pool(ctlr);
 538
 539        spin_unlock_irqrestore(&ctlr->lock, flags);
 540        return 0;
 541}
 542EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
 543
 544int cpdma_chan_get_stats(struct cpdma_chan *chan,
 545                         struct cpdma_chan_stats *stats)
 546{
 547        unsigned long flags;
 548        if (!chan)
 549                return -EINVAL;
 550        spin_lock_irqsave(&chan->lock, flags);
 551        memcpy(stats, &chan->stats, sizeof(*stats));
 552        spin_unlock_irqrestore(&chan->lock, flags);
 553        return 0;
 554}
 555EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
 556
 557static void __cpdma_chan_submit(struct cpdma_chan *chan,
 558                                struct cpdma_desc __iomem *desc)
 559{
 560        struct cpdma_ctlr               *ctlr = chan->ctlr;
 561        struct cpdma_desc __iomem       *prev = chan->tail;
 562        struct cpdma_desc_pool          *pool = ctlr->pool;
 563        dma_addr_t                      desc_dma;
 564        u32                             mode;
 565
 566        desc_dma = desc_phys(pool, desc);
 567
 568        /* simple case - idle channel */
 569        if (!chan->head) {
 570                chan->stats.head_enqueue++;
 571                chan->head = desc;
 572                chan->tail = desc;
 573                if (chan->state == CPDMA_STATE_ACTIVE)
 574                        chan_write(chan, hdp, desc_dma);
 575                return;
 576        }
 577
 578        /* first chain the descriptor at the tail of the list */
 579        desc_write(prev, hw_next, desc_dma);
 580        chan->tail = desc;
 581        chan->stats.tail_enqueue++;
 582
 583        /* next check if EOQ has been triggered already */
 584        mode = desc_read(prev, hw_mode);
 585        if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
 586            (chan->state == CPDMA_STATE_ACTIVE)) {
 587                desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
 588                chan_write(chan, hdp, desc_dma);
 589                chan->stats.misqueued++;
 590        }
 591}
 592
 593int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
 594                      int len, int directed)
 595{
 596        struct cpdma_ctlr               *ctlr = chan->ctlr;
 597        struct cpdma_desc __iomem       *desc;
 598        dma_addr_t                      buffer;
 599        unsigned long                   flags;
 600        u32                             mode;
 601        int                             ret = 0;
 602
 603        spin_lock_irqsave(&chan->lock, flags);
 604
 605        if (chan->state == CPDMA_STATE_TEARDOWN) {
 606                ret = -EINVAL;
 607                goto unlock_ret;
 608        }
 609
 610        if (chan->count >= chan->desc_num)      {
 611                chan->stats.desc_alloc_fail++;
 612                ret = -ENOMEM;
 613                goto unlock_ret;
 614        }
 615
 616        desc = cpdma_desc_alloc(ctlr->pool);
 617        if (!desc) {
 618                chan->stats.desc_alloc_fail++;
 619                ret = -ENOMEM;
 620                goto unlock_ret;
 621        }
 622
 623        if (len < ctlr->params.min_packet_size) {
 624                len = ctlr->params.min_packet_size;
 625                chan->stats.runt_transmit_buff++;
 626        }
 627
 628        buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
 629        ret = dma_mapping_error(ctlr->dev, buffer);
 630        if (ret) {
 631                cpdma_desc_free(ctlr->pool, desc, 1);
 632                ret = -EINVAL;
 633                goto unlock_ret;
 634        }
 635
 636        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
 637        cpdma_desc_to_port(chan, mode, directed);
 638
 639        desc_write(desc, hw_next,   0);
 640        desc_write(desc, hw_buffer, buffer);
 641        desc_write(desc, hw_len,    len);
 642        desc_write(desc, hw_mode,   mode | len);
 643        desc_write(desc, sw_token,  token);
 644        desc_write(desc, sw_buffer, buffer);
 645        desc_write(desc, sw_len,    len);
 646
 647        __cpdma_chan_submit(chan, desc);
 648
 649        if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
 650                chan_write(chan, rxfree, 1);
 651
 652        chan->count++;
 653
 654unlock_ret:
 655        spin_unlock_irqrestore(&chan->lock, flags);
 656        return ret;
 657}
 658EXPORT_SYMBOL_GPL(cpdma_chan_submit);
 659
 660bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
 661{
 662        struct cpdma_ctlr       *ctlr = chan->ctlr;
 663        struct cpdma_desc_pool  *pool = ctlr->pool;
 664        bool                    free_tx_desc;
 665        unsigned long           flags;
 666
 667        spin_lock_irqsave(&chan->lock, flags);
 668        free_tx_desc = (chan->count < chan->desc_num) &&
 669                         gen_pool_avail(pool->gen_pool);
 670        spin_unlock_irqrestore(&chan->lock, flags);
 671        return free_tx_desc;
 672}
 673EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
 674
 675static void __cpdma_chan_free(struct cpdma_chan *chan,
 676                              struct cpdma_desc __iomem *desc,
 677                              int outlen, int status)
 678{
 679        struct cpdma_ctlr               *ctlr = chan->ctlr;
 680        struct cpdma_desc_pool          *pool = ctlr->pool;
 681        dma_addr_t                      buff_dma;
 682        int                             origlen;
 683        void                            *token;
 684
 685        token      = (void *)desc_read(desc, sw_token);
 686        buff_dma   = desc_read(desc, sw_buffer);
 687        origlen    = desc_read(desc, sw_len);
 688
 689        dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
 690        cpdma_desc_free(pool, desc, 1);
 691        (*chan->handler)(token, outlen, status);
 692}
 693
 694static int __cpdma_chan_process(struct cpdma_chan *chan)
 695{
 696        struct cpdma_ctlr               *ctlr = chan->ctlr;
 697        struct cpdma_desc __iomem       *desc;
 698        int                             status, outlen;
 699        int                             cb_status = 0;
 700        struct cpdma_desc_pool          *pool = ctlr->pool;
 701        dma_addr_t                      desc_dma;
 702        unsigned long                   flags;
 703
 704        spin_lock_irqsave(&chan->lock, flags);
 705
 706        desc = chan->head;
 707        if (!desc) {
 708                chan->stats.empty_dequeue++;
 709                status = -ENOENT;
 710                goto unlock_ret;
 711        }
 712        desc_dma = desc_phys(pool, desc);
 713
 714        status  = __raw_readl(&desc->hw_mode);
 715        outlen  = status & 0x7ff;
 716        if (status & CPDMA_DESC_OWNER) {
 717                chan->stats.busy_dequeue++;
 718                status = -EBUSY;
 719                goto unlock_ret;
 720        }
 721
 722        if (status & CPDMA_DESC_PASS_CRC)
 723                outlen -= CPDMA_DESC_CRC_LEN;
 724
 725        status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
 726                            CPDMA_DESC_PORT_MASK);
 727
 728        chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
 729        chan_write(chan, cp, desc_dma);
 730        chan->count--;
 731        chan->stats.good_dequeue++;
 732
 733        if (status & CPDMA_DESC_EOQ) {
 734                chan->stats.requeue++;
 735                chan_write(chan, hdp, desc_phys(pool, chan->head));
 736        }
 737
 738        spin_unlock_irqrestore(&chan->lock, flags);
 739        if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
 740                cb_status = -ENOSYS;
 741        else
 742                cb_status = status;
 743
 744        __cpdma_chan_free(chan, desc, outlen, cb_status);
 745        return status;
 746
 747unlock_ret:
 748        spin_unlock_irqrestore(&chan->lock, flags);
 749        return status;
 750}
 751
 752int cpdma_chan_process(struct cpdma_chan *chan, int quota)
 753{
 754        int used = 0, ret = 0;
 755
 756        if (chan->state != CPDMA_STATE_ACTIVE)
 757                return -EINVAL;
 758
 759        while (used < quota) {
 760                ret = __cpdma_chan_process(chan);
 761                if (ret < 0)
 762                        break;
 763                used++;
 764        }
 765        return used;
 766}
 767EXPORT_SYMBOL_GPL(cpdma_chan_process);
 768
 769int cpdma_chan_start(struct cpdma_chan *chan)
 770{
 771        struct cpdma_ctlr       *ctlr = chan->ctlr;
 772        struct cpdma_desc_pool  *pool = ctlr->pool;
 773        unsigned long           flags;
 774
 775        spin_lock_irqsave(&chan->lock, flags);
 776        if (chan->state != CPDMA_STATE_IDLE) {
 777                spin_unlock_irqrestore(&chan->lock, flags);
 778                return -EBUSY;
 779        }
 780        if (ctlr->state != CPDMA_STATE_ACTIVE) {
 781                spin_unlock_irqrestore(&chan->lock, flags);
 782                return -EINVAL;
 783        }
 784        dma_reg_write(ctlr, chan->int_set, chan->mask);
 785        chan->state = CPDMA_STATE_ACTIVE;
 786        if (chan->head) {
 787                chan_write(chan, hdp, desc_phys(pool, chan->head));
 788                if (chan->rxfree)
 789                        chan_write(chan, rxfree, chan->count);
 790        }
 791
 792        spin_unlock_irqrestore(&chan->lock, flags);
 793        return 0;
 794}
 795EXPORT_SYMBOL_GPL(cpdma_chan_start);
 796
 797int cpdma_chan_stop(struct cpdma_chan *chan)
 798{
 799        struct cpdma_ctlr       *ctlr = chan->ctlr;
 800        struct cpdma_desc_pool  *pool = ctlr->pool;
 801        unsigned long           flags;
 802        int                     ret;
 803        unsigned                timeout;
 804
 805        spin_lock_irqsave(&chan->lock, flags);
 806        if (chan->state == CPDMA_STATE_TEARDOWN) {
 807                spin_unlock_irqrestore(&chan->lock, flags);
 808                return -EINVAL;
 809        }
 810
 811        chan->state = CPDMA_STATE_TEARDOWN;
 812        dma_reg_write(ctlr, chan->int_clear, chan->mask);
 813
 814        /* trigger teardown */
 815        dma_reg_write(ctlr, chan->td, chan_linear(chan));
 816
 817        /* wait for teardown complete */
 818        timeout = 100 * 100; /* 100 ms */
 819        while (timeout) {
 820                u32 cp = chan_read(chan, cp);
 821                if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
 822                        break;
 823                udelay(10);
 824                timeout--;
 825        }
 826        WARN_ON(!timeout);
 827        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 828
 829        /* handle completed packets */
 830        spin_unlock_irqrestore(&chan->lock, flags);
 831        do {
 832                ret = __cpdma_chan_process(chan);
 833                if (ret < 0)
 834                        break;
 835        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
 836        spin_lock_irqsave(&chan->lock, flags);
 837
 838        /* remaining packets haven't been tx/rx'ed, clean them up */
 839        while (chan->head) {
 840                struct cpdma_desc __iomem *desc = chan->head;
 841                dma_addr_t next_dma;
 842
 843                next_dma = desc_read(desc, hw_next);
 844                chan->head = desc_from_phys(pool, next_dma);
 845                chan->count--;
 846                chan->stats.teardown_dequeue++;
 847
 848                /* issue callback without locks held */
 849                spin_unlock_irqrestore(&chan->lock, flags);
 850                __cpdma_chan_free(chan, desc, 0, -ENOSYS);
 851                spin_lock_irqsave(&chan->lock, flags);
 852        }
 853
 854        chan->state = CPDMA_STATE_IDLE;
 855        spin_unlock_irqrestore(&chan->lock, flags);
 856        return 0;
 857}
 858EXPORT_SYMBOL_GPL(cpdma_chan_stop);
 859
 860int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
 861{
 862        unsigned long flags;
 863
 864        spin_lock_irqsave(&chan->lock, flags);
 865        if (chan->state != CPDMA_STATE_ACTIVE) {
 866                spin_unlock_irqrestore(&chan->lock, flags);
 867                return -EINVAL;
 868        }
 869
 870        dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
 871                      chan->mask);
 872        spin_unlock_irqrestore(&chan->lock, flags);
 873
 874        return 0;
 875}
 876
 877struct cpdma_control_info {
 878        u32             reg;
 879        u32             shift, mask;
 880        int             access;
 881#define ACCESS_RO       BIT(0)
 882#define ACCESS_WO       BIT(1)
 883#define ACCESS_RW       (ACCESS_RO | ACCESS_WO)
 884};
 885
 886static struct cpdma_control_info controls[] = {
 887        [CPDMA_CMD_IDLE]          = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
 888        [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,  4,  1,      ACCESS_RW},
 889        [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,  2,  1,      ACCESS_RW},
 890        [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,  1,  1,      ACCESS_RW},
 891        [CPDMA_TX_PRIO_FIXED]     = {CPDMA_DMACONTROL,  0,  1,      ACCESS_RW},
 892        [CPDMA_STAT_IDLE]         = {CPDMA_DMASTATUS,   31, 1,      ACCESS_RO},
 893        [CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,   20, 0xf,    ACCESS_RW},
 894        [CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,   16, 0x7,    ACCESS_RW},
 895        [CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,   12, 0xf,    ACCESS_RW},
 896        [CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,   8,  0x7,    ACCESS_RW},
 897        [CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,   0,  0xffff, ACCESS_RW},
 898};
 899
 900int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
 901{
 902        unsigned long flags;
 903        struct cpdma_control_info *info = &controls[control];
 904        int ret;
 905
 906        spin_lock_irqsave(&ctlr->lock, flags);
 907
 908        ret = -ENOTSUPP;
 909        if (!ctlr->params.has_ext_regs)
 910                goto unlock_ret;
 911
 912        ret = -EINVAL;
 913        if (ctlr->state != CPDMA_STATE_ACTIVE)
 914                goto unlock_ret;
 915
 916        ret = -ENOENT;
 917        if (control < 0 || control >= ARRAY_SIZE(controls))
 918                goto unlock_ret;
 919
 920        ret = -EPERM;
 921        if ((info->access & ACCESS_RO) != ACCESS_RO)
 922                goto unlock_ret;
 923
 924        ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
 925
 926unlock_ret:
 927        spin_unlock_irqrestore(&ctlr->lock, flags);
 928        return ret;
 929}
 930
 931int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
 932{
 933        unsigned long flags;
 934        struct cpdma_control_info *info = &controls[control];
 935        int ret;
 936        u32 val;
 937
 938        spin_lock_irqsave(&ctlr->lock, flags);
 939
 940        ret = -ENOTSUPP;
 941        if (!ctlr->params.has_ext_regs)
 942                goto unlock_ret;
 943
 944        ret = -EINVAL;
 945        if (ctlr->state != CPDMA_STATE_ACTIVE)
 946                goto unlock_ret;
 947
 948        ret = -ENOENT;
 949        if (control < 0 || control >= ARRAY_SIZE(controls))
 950                goto unlock_ret;
 951
 952        ret = -EPERM;
 953        if ((info->access & ACCESS_WO) != ACCESS_WO)
 954                goto unlock_ret;
 955
 956        val  = dma_reg_read(ctlr, info->reg);
 957        val &= ~(info->mask << info->shift);
 958        val |= (value & info->mask) << info->shift;
 959        dma_reg_write(ctlr, info->reg, val);
 960        ret = 0;
 961
 962unlock_ret:
 963        spin_unlock_irqrestore(&ctlr->lock, flags);
 964        return ret;
 965}
 966EXPORT_SYMBOL_GPL(cpdma_control_set);
 967
 968MODULE_LICENSE("GPL");
 969