linux/drivers/net/davinci_cpdma.c
<<
>>
Prefs
   1/*
   2 * Texas Instruments CPDMA Driver
   3 *
   4 * Copyright (C) 2010 Texas Instruments
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation version 2.
   9 *
  10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11 * kind, whether express or implied; without even the implied warranty
  12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 */
  15#include <linux/kernel.h>
  16#include <linux/spinlock.h>
  17#include <linux/device.h>
  18#include <linux/slab.h>
  19#include <linux/err.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/io.h>
  22
  23#include "davinci_cpdma.h"
  24
  25/* DMA Registers */
  26#define CPDMA_TXIDVER           0x00
  27#define CPDMA_TXCONTROL         0x04
  28#define CPDMA_TXTEARDOWN        0x08
  29#define CPDMA_RXIDVER           0x10
  30#define CPDMA_RXCONTROL         0x14
  31#define CPDMA_SOFTRESET         0x1c
  32#define CPDMA_RXTEARDOWN        0x18
  33#define CPDMA_TXINTSTATRAW      0x80
  34#define CPDMA_TXINTSTATMASKED   0x84
  35#define CPDMA_TXINTMASKSET      0x88
  36#define CPDMA_TXINTMASKCLEAR    0x8c
  37#define CPDMA_MACINVECTOR       0x90
  38#define CPDMA_MACEOIVECTOR      0x94
  39#define CPDMA_RXINTSTATRAW      0xa0
  40#define CPDMA_RXINTSTATMASKED   0xa4
  41#define CPDMA_RXINTMASKSET      0xa8
  42#define CPDMA_RXINTMASKCLEAR    0xac
  43#define CPDMA_DMAINTSTATRAW     0xb0
  44#define CPDMA_DMAINTSTATMASKED  0xb4
  45#define CPDMA_DMAINTMASKSET     0xb8
  46#define CPDMA_DMAINTMASKCLEAR   0xbc
  47#define CPDMA_DMAINT_HOSTERR    BIT(1)
  48
  49/* the following exist only if has_ext_regs is set */
  50#define CPDMA_DMACONTROL        0x20
  51#define CPDMA_DMASTATUS         0x24
  52#define CPDMA_RXBUFFOFS         0x28
  53#define CPDMA_EM_CONTROL        0x2c
  54
  55/* Descriptor mode bits */
  56#define CPDMA_DESC_SOP          BIT(31)
  57#define CPDMA_DESC_EOP          BIT(30)
  58#define CPDMA_DESC_OWNER        BIT(29)
  59#define CPDMA_DESC_EOQ          BIT(28)
  60#define CPDMA_DESC_TD_COMPLETE  BIT(27)
  61#define CPDMA_DESC_PASS_CRC     BIT(26)
  62
  63#define CPDMA_TEARDOWN_VALUE    0xfffffffc
  64
  65struct cpdma_desc {
  66        /* hardware fields */
  67        u32                     hw_next;
  68        u32                     hw_buffer;
  69        u32                     hw_len;
  70        u32                     hw_mode;
  71        /* software fields */
  72        void                    *sw_token;
  73        u32                     sw_buffer;
  74        u32                     sw_len;
  75};
  76
  77struct cpdma_desc_pool {
  78        u32                     phys;
  79        u32                     hw_addr;
  80        void __iomem            *iomap;         /* ioremap map */
  81        void                    *cpumap;        /* dma_alloc map */
  82        int                     desc_size, mem_size;
  83        int                     num_desc, used_desc;
  84        unsigned long           *bitmap;
  85        struct device           *dev;
  86        spinlock_t              lock;
  87};
  88
  89enum cpdma_state {
  90        CPDMA_STATE_IDLE,
  91        CPDMA_STATE_ACTIVE,
  92        CPDMA_STATE_TEARDOWN,
  93};
  94
  95const char *cpdma_state_str[] = { "idle", "active", "teardown" };
  96
  97struct cpdma_ctlr {
  98        enum cpdma_state        state;
  99        struct cpdma_params     params;
 100        struct device           *dev;
 101        struct cpdma_desc_pool  *pool;
 102        spinlock_t              lock;
 103        struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
 104};
 105
 106struct cpdma_chan {
 107        enum cpdma_state                state;
 108        struct cpdma_ctlr               *ctlr;
 109        int                             chan_num;
 110        spinlock_t                      lock;
 111        struct cpdma_desc __iomem       *head, *tail;
 112        int                             count;
 113        void __iomem                    *hdp, *cp, *rxfree;
 114        u32                             mask;
 115        cpdma_handler_fn                handler;
 116        enum dma_data_direction         dir;
 117        struct cpdma_chan_stats         stats;
 118        /* offsets into dmaregs */
 119        int     int_set, int_clear, td;
 120};
 121
 122/* The following make access to common cpdma_ctlr params more readable */
 123#define dmaregs         params.dmaregs
 124#define num_chan        params.num_chan
 125
 126/* various accessors */
 127#define dma_reg_read(ctlr, ofs)         __raw_readl((ctlr)->dmaregs + (ofs))
 128#define chan_read(chan, fld)            __raw_readl((chan)->fld)
 129#define desc_read(desc, fld)            __raw_readl(&(desc)->fld)
 130#define dma_reg_write(ctlr, ofs, v)     __raw_writel(v, (ctlr)->dmaregs + (ofs))
 131#define chan_write(chan, fld, v)        __raw_writel(v, (chan)->fld)
 132#define desc_write(desc, fld, v)        __raw_writel((u32)(v), &(desc)->fld)
 133
 134/*
 135 * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
 136 * emac) have dedicated on-chip memory for these descriptors.  Some other
 137 * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
 138 * abstract out these details
 139 */
 140static struct cpdma_desc_pool *
 141cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
 142                                int size, int align)
 143{
 144        int bitmap_size;
 145        struct cpdma_desc_pool *pool;
 146
 147        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 148        if (!pool)
 149                return NULL;
 150
 151        spin_lock_init(&pool->lock);
 152
 153        pool->dev       = dev;
 154        pool->mem_size  = size;
 155        pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
 156        pool->num_desc  = size / pool->desc_size;
 157
 158        bitmap_size  = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
 159        pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 160        if (!pool->bitmap)
 161                goto fail;
 162
 163        if (phys) {
 164                pool->phys  = phys;
 165                pool->iomap = ioremap(phys, size);
 166                pool->hw_addr = hw_addr;
 167        } else {
 168                pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
 169                                                  GFP_KERNEL);
 170                pool->iomap = pool->cpumap;
 171                pool->hw_addr = pool->phys;
 172        }
 173
 174        if (pool->iomap)
 175                return pool;
 176
 177fail:
 178        kfree(pool->bitmap);
 179        kfree(pool);
 180        return NULL;
 181}
 182
 183static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
 184{
 185        unsigned long flags;
 186
 187        if (!pool)
 188                return;
 189
 190        spin_lock_irqsave(&pool->lock, flags);
 191        WARN_ON(pool->used_desc);
 192        kfree(pool->bitmap);
 193        if (pool->cpumap) {
 194                dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
 195                                  pool->phys);
 196        } else {
 197                iounmap(pool->iomap);
 198        }
 199        spin_unlock_irqrestore(&pool->lock, flags);
 200        kfree(pool);
 201}
 202
 203static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
 204                  struct cpdma_desc __iomem *desc)
 205{
 206        if (!desc)
 207                return 0;
 208        return pool->hw_addr + (__force dma_addr_t)desc -
 209                            (__force dma_addr_t)pool->iomap;
 210}
 211
 212static inline struct cpdma_desc __iomem *
 213desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
 214{
 215        return dma ? pool->iomap + dma - pool->hw_addr : NULL;
 216}
 217
 218static struct cpdma_desc __iomem *
 219cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
 220{
 221        unsigned long flags;
 222        int index;
 223        struct cpdma_desc __iomem *desc = NULL;
 224
 225        spin_lock_irqsave(&pool->lock, flags);
 226
 227        index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
 228                                           num_desc, 0);
 229        if (index < pool->num_desc) {
 230                bitmap_set(pool->bitmap, index, num_desc);
 231                desc = pool->iomap + pool->desc_size * index;
 232                pool->used_desc++;
 233        }
 234
 235        spin_unlock_irqrestore(&pool->lock, flags);
 236        return desc;
 237}
 238
 239static void cpdma_desc_free(struct cpdma_desc_pool *pool,
 240                            struct cpdma_desc __iomem *desc, int num_desc)
 241{
 242        unsigned long flags, index;
 243
 244        index = ((unsigned long)desc - (unsigned long)pool->iomap) /
 245                pool->desc_size;
 246        spin_lock_irqsave(&pool->lock, flags);
 247        bitmap_clear(pool->bitmap, index, num_desc);
 248        pool->used_desc--;
 249        spin_unlock_irqrestore(&pool->lock, flags);
 250}
 251
 252struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
 253{
 254        struct cpdma_ctlr *ctlr;
 255
 256        ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
 257        if (!ctlr)
 258                return NULL;
 259
 260        ctlr->state = CPDMA_STATE_IDLE;
 261        ctlr->params = *params;
 262        ctlr->dev = params->dev;
 263        spin_lock_init(&ctlr->lock);
 264
 265        ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
 266                                            ctlr->params.desc_mem_phys,
 267                                            ctlr->params.desc_hw_addr,
 268                                            ctlr->params.desc_mem_size,
 269                                            ctlr->params.desc_align);
 270        if (!ctlr->pool) {
 271                kfree(ctlr);
 272                return NULL;
 273        }
 274
 275        if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
 276                ctlr->num_chan = CPDMA_MAX_CHANNELS;
 277        return ctlr;
 278}
 279
 280int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
 281{
 282        unsigned long flags;
 283        int i;
 284
 285        spin_lock_irqsave(&ctlr->lock, flags);
 286        if (ctlr->state != CPDMA_STATE_IDLE) {
 287                spin_unlock_irqrestore(&ctlr->lock, flags);
 288                return -EBUSY;
 289        }
 290
 291        if (ctlr->params.has_soft_reset) {
 292                unsigned long timeout = jiffies + HZ/10;
 293
 294                dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
 295                while (time_before(jiffies, timeout)) {
 296                        if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
 297                                break;
 298                }
 299                WARN_ON(!time_before(jiffies, timeout));
 300        }
 301
 302        for (i = 0; i < ctlr->num_chan; i++) {
 303                __raw_writel(0, ctlr->params.txhdp + 4 * i);
 304                __raw_writel(0, ctlr->params.rxhdp + 4 * i);
 305                __raw_writel(0, ctlr->params.txcp + 4 * i);
 306                __raw_writel(0, ctlr->params.rxcp + 4 * i);
 307        }
 308
 309        dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
 310        dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
 311
 312        dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
 313        dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
 314
 315        ctlr->state = CPDMA_STATE_ACTIVE;
 316
 317        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 318                if (ctlr->channels[i])
 319                        cpdma_chan_start(ctlr->channels[i]);
 320        }
 321        spin_unlock_irqrestore(&ctlr->lock, flags);
 322        return 0;
 323}
 324
 325int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 326{
 327        unsigned long flags;
 328        int i;
 329
 330        spin_lock_irqsave(&ctlr->lock, flags);
 331        if (ctlr->state != CPDMA_STATE_ACTIVE) {
 332                spin_unlock_irqrestore(&ctlr->lock, flags);
 333                return -EINVAL;
 334        }
 335
 336        ctlr->state = CPDMA_STATE_TEARDOWN;
 337
 338        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 339                if (ctlr->channels[i])
 340                        cpdma_chan_stop(ctlr->channels[i]);
 341        }
 342
 343        dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
 344        dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
 345
 346        dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
 347        dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
 348
 349        ctlr->state = CPDMA_STATE_IDLE;
 350
 351        spin_unlock_irqrestore(&ctlr->lock, flags);
 352        return 0;
 353}
 354
 355int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
 356{
 357        struct device *dev = ctlr->dev;
 358        unsigned long flags;
 359        int i;
 360
 361        spin_lock_irqsave(&ctlr->lock, flags);
 362
 363        dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
 364
 365        dev_info(dev, "CPDMA: txidver: %x",
 366                 dma_reg_read(ctlr, CPDMA_TXIDVER));
 367        dev_info(dev, "CPDMA: txcontrol: %x",
 368                 dma_reg_read(ctlr, CPDMA_TXCONTROL));
 369        dev_info(dev, "CPDMA: txteardown: %x",
 370                 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
 371        dev_info(dev, "CPDMA: rxidver: %x",
 372                 dma_reg_read(ctlr, CPDMA_RXIDVER));
 373        dev_info(dev, "CPDMA: rxcontrol: %x",
 374                 dma_reg_read(ctlr, CPDMA_RXCONTROL));
 375        dev_info(dev, "CPDMA: softreset: %x",
 376                 dma_reg_read(ctlr, CPDMA_SOFTRESET));
 377        dev_info(dev, "CPDMA: rxteardown: %x",
 378                 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
 379        dev_info(dev, "CPDMA: txintstatraw: %x",
 380                 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
 381        dev_info(dev, "CPDMA: txintstatmasked: %x",
 382                 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
 383        dev_info(dev, "CPDMA: txintmaskset: %x",
 384                 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
 385        dev_info(dev, "CPDMA: txintmaskclear: %x",
 386                 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
 387        dev_info(dev, "CPDMA: macinvector: %x",
 388                 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
 389        dev_info(dev, "CPDMA: maceoivector: %x",
 390                 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
 391        dev_info(dev, "CPDMA: rxintstatraw: %x",
 392                 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
 393        dev_info(dev, "CPDMA: rxintstatmasked: %x",
 394                 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
 395        dev_info(dev, "CPDMA: rxintmaskset: %x",
 396                 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
 397        dev_info(dev, "CPDMA: rxintmaskclear: %x",
 398                 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
 399        dev_info(dev, "CPDMA: dmaintstatraw: %x",
 400                 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
 401        dev_info(dev, "CPDMA: dmaintstatmasked: %x",
 402                 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
 403        dev_info(dev, "CPDMA: dmaintmaskset: %x",
 404                 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
 405        dev_info(dev, "CPDMA: dmaintmaskclear: %x",
 406                 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
 407
 408        if (!ctlr->params.has_ext_regs) {
 409                dev_info(dev, "CPDMA: dmacontrol: %x",
 410                         dma_reg_read(ctlr, CPDMA_DMACONTROL));
 411                dev_info(dev, "CPDMA: dmastatus: %x",
 412                         dma_reg_read(ctlr, CPDMA_DMASTATUS));
 413                dev_info(dev, "CPDMA: rxbuffofs: %x",
 414                         dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
 415        }
 416
 417        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
 418                if (ctlr->channels[i])
 419                        cpdma_chan_dump(ctlr->channels[i]);
 420
 421        spin_unlock_irqrestore(&ctlr->lock, flags);
 422        return 0;
 423}
 424
 425int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 426{
 427        unsigned long flags;
 428        int ret = 0, i;
 429
 430        if (!ctlr)
 431                return -EINVAL;
 432
 433        spin_lock_irqsave(&ctlr->lock, flags);
 434        if (ctlr->state != CPDMA_STATE_IDLE)
 435                cpdma_ctlr_stop(ctlr);
 436
 437        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 438                if (ctlr->channels[i])
 439                        cpdma_chan_destroy(ctlr->channels[i]);
 440        }
 441
 442        cpdma_desc_pool_destroy(ctlr->pool);
 443        spin_unlock_irqrestore(&ctlr->lock, flags);
 444        kfree(ctlr);
 445        return ret;
 446}
 447
 448int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 449{
 450        unsigned long flags;
 451        int i, reg;
 452
 453        spin_lock_irqsave(&ctlr->lock, flags);
 454        if (ctlr->state != CPDMA_STATE_ACTIVE) {
 455                spin_unlock_irqrestore(&ctlr->lock, flags);
 456                return -EINVAL;
 457        }
 458
 459        reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
 460        dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
 461
 462        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 463                if (ctlr->channels[i])
 464                        cpdma_chan_int_ctrl(ctlr->channels[i], enable);
 465        }
 466
 467        spin_unlock_irqrestore(&ctlr->lock, flags);
 468        return 0;
 469}
 470
 471void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
 472{
 473        dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
 474}
 475
 476struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 477                                     cpdma_handler_fn handler)
 478{
 479        struct cpdma_chan *chan;
 480        int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
 481        unsigned long flags;
 482
 483        if (__chan_linear(chan_num) >= ctlr->num_chan)
 484                return NULL;
 485
 486        ret = -ENOMEM;
 487        chan = kzalloc(sizeof(*chan), GFP_KERNEL);
 488        if (!chan)
 489                goto err_chan_alloc;
 490
 491        spin_lock_irqsave(&ctlr->lock, flags);
 492        ret = -EBUSY;
 493        if (ctlr->channels[chan_num])
 494                goto err_chan_busy;
 495
 496        chan->ctlr      = ctlr;
 497        chan->state     = CPDMA_STATE_IDLE;
 498        chan->chan_num  = chan_num;
 499        chan->handler   = handler;
 500
 501        if (is_rx_chan(chan)) {
 502                chan->hdp       = ctlr->params.rxhdp + offset;
 503                chan->cp        = ctlr->params.rxcp + offset;
 504                chan->rxfree    = ctlr->params.rxfree + offset;
 505                chan->int_set   = CPDMA_RXINTMASKSET;
 506                chan->int_clear = CPDMA_RXINTMASKCLEAR;
 507                chan->td        = CPDMA_RXTEARDOWN;
 508                chan->dir       = DMA_FROM_DEVICE;
 509        } else {
 510                chan->hdp       = ctlr->params.txhdp + offset;
 511                chan->cp        = ctlr->params.txcp + offset;
 512                chan->int_set   = CPDMA_TXINTMASKSET;
 513                chan->int_clear = CPDMA_TXINTMASKCLEAR;
 514                chan->td        = CPDMA_TXTEARDOWN;
 515                chan->dir       = DMA_TO_DEVICE;
 516        }
 517        chan->mask = BIT(chan_linear(chan));
 518
 519        spin_lock_init(&chan->lock);
 520
 521        ctlr->channels[chan_num] = chan;
 522        spin_unlock_irqrestore(&ctlr->lock, flags);
 523        return chan;
 524
 525err_chan_busy:
 526        spin_unlock_irqrestore(&ctlr->lock, flags);
 527        kfree(chan);
 528err_chan_alloc:
 529        return ERR_PTR(ret);
 530}
 531
 532int cpdma_chan_destroy(struct cpdma_chan *chan)
 533{
 534        struct cpdma_ctlr *ctlr = chan->ctlr;
 535        unsigned long flags;
 536
 537        if (!chan)
 538                return -EINVAL;
 539
 540        spin_lock_irqsave(&ctlr->lock, flags);
 541        if (chan->state != CPDMA_STATE_IDLE)
 542                cpdma_chan_stop(chan);
 543        ctlr->channels[chan->chan_num] = NULL;
 544        spin_unlock_irqrestore(&ctlr->lock, flags);
 545        kfree(chan);
 546        return 0;
 547}
 548
 549int cpdma_chan_get_stats(struct cpdma_chan *chan,
 550                         struct cpdma_chan_stats *stats)
 551{
 552        unsigned long flags;
 553        if (!chan)
 554                return -EINVAL;
 555        spin_lock_irqsave(&chan->lock, flags);
 556        memcpy(stats, &chan->stats, sizeof(*stats));
 557        spin_unlock_irqrestore(&chan->lock, flags);
 558        return 0;
 559}
 560
 561int cpdma_chan_dump(struct cpdma_chan *chan)
 562{
 563        unsigned long flags;
 564        struct device *dev = chan->ctlr->dev;
 565
 566        spin_lock_irqsave(&chan->lock, flags);
 567
 568        dev_info(dev, "channel %d (%s %d) state %s",
 569                 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
 570                 chan_linear(chan), cpdma_state_str[chan->state]);
 571        dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
 572        dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
 573        if (chan->rxfree) {
 574                dev_info(dev, "\trxfree: %x\n",
 575                         chan_read(chan, rxfree));
 576        }
 577
 578        dev_info(dev, "\tstats head_enqueue: %d\n",
 579                 chan->stats.head_enqueue);
 580        dev_info(dev, "\tstats tail_enqueue: %d\n",
 581                 chan->stats.tail_enqueue);
 582        dev_info(dev, "\tstats pad_enqueue: %d\n",
 583                 chan->stats.pad_enqueue);
 584        dev_info(dev, "\tstats misqueued: %d\n",
 585                 chan->stats.misqueued);
 586        dev_info(dev, "\tstats desc_alloc_fail: %d\n",
 587                 chan->stats.desc_alloc_fail);
 588        dev_info(dev, "\tstats pad_alloc_fail: %d\n",
 589                 chan->stats.pad_alloc_fail);
 590        dev_info(dev, "\tstats runt_receive_buff: %d\n",
 591                 chan->stats.runt_receive_buff);
 592        dev_info(dev, "\tstats runt_transmit_buff: %d\n",
 593                 chan->stats.runt_transmit_buff);
 594        dev_info(dev, "\tstats empty_dequeue: %d\n",
 595                 chan->stats.empty_dequeue);
 596        dev_info(dev, "\tstats busy_dequeue: %d\n",
 597                 chan->stats.busy_dequeue);
 598        dev_info(dev, "\tstats good_dequeue: %d\n",
 599                 chan->stats.good_dequeue);
 600        dev_info(dev, "\tstats requeue: %d\n",
 601                 chan->stats.requeue);
 602        dev_info(dev, "\tstats teardown_dequeue: %d\n",
 603                 chan->stats.teardown_dequeue);
 604
 605        spin_unlock_irqrestore(&chan->lock, flags);
 606        return 0;
 607}
 608
 609static void __cpdma_chan_submit(struct cpdma_chan *chan,
 610                                struct cpdma_desc __iomem *desc)
 611{
 612        struct cpdma_ctlr               *ctlr = chan->ctlr;
 613        struct cpdma_desc __iomem       *prev = chan->tail;
 614        struct cpdma_desc_pool          *pool = ctlr->pool;
 615        dma_addr_t                      desc_dma;
 616        u32                             mode;
 617
 618        desc_dma = desc_phys(pool, desc);
 619
 620        /* simple case - idle channel */
 621        if (!chan->head) {
 622                chan->stats.head_enqueue++;
 623                chan->head = desc;
 624                chan->tail = desc;
 625                if (chan->state == CPDMA_STATE_ACTIVE)
 626                        chan_write(chan, hdp, desc_dma);
 627                return;
 628        }
 629
 630        /* first chain the descriptor at the tail of the list */
 631        desc_write(prev, hw_next, desc_dma);
 632        chan->tail = desc;
 633        chan->stats.tail_enqueue++;
 634
 635        /* next check if EOQ has been triggered already */
 636        mode = desc_read(prev, hw_mode);
 637        if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
 638            (chan->state == CPDMA_STATE_ACTIVE)) {
 639                desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
 640                chan_write(chan, hdp, desc_dma);
 641                chan->stats.misqueued++;
 642        }
 643}
 644
 645int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
 646                      int len, gfp_t gfp_mask)
 647{
 648        struct cpdma_ctlr               *ctlr = chan->ctlr;
 649        struct cpdma_desc __iomem       *desc;
 650        dma_addr_t                      buffer;
 651        unsigned long                   flags;
 652        u32                             mode;
 653        int                             ret = 0;
 654
 655        spin_lock_irqsave(&chan->lock, flags);
 656
 657        if (chan->state == CPDMA_STATE_TEARDOWN) {
 658                ret = -EINVAL;
 659                goto unlock_ret;
 660        }
 661
 662        desc = cpdma_desc_alloc(ctlr->pool, 1);
 663        if (!desc) {
 664                chan->stats.desc_alloc_fail++;
 665                ret = -ENOMEM;
 666                goto unlock_ret;
 667        }
 668
 669        if (len < ctlr->params.min_packet_size) {
 670                len = ctlr->params.min_packet_size;
 671                chan->stats.runt_transmit_buff++;
 672        }
 673
 674        buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
 675        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
 676
 677        desc_write(desc, hw_next,   0);
 678        desc_write(desc, hw_buffer, buffer);
 679        desc_write(desc, hw_len,    len);
 680        desc_write(desc, hw_mode,   mode | len);
 681        desc_write(desc, sw_token,  token);
 682        desc_write(desc, sw_buffer, buffer);
 683        desc_write(desc, sw_len,    len);
 684
 685        __cpdma_chan_submit(chan, desc);
 686
 687        if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
 688                chan_write(chan, rxfree, 1);
 689
 690        chan->count++;
 691
 692unlock_ret:
 693        spin_unlock_irqrestore(&chan->lock, flags);
 694        return ret;
 695}
 696
 697static void __cpdma_chan_free(struct cpdma_chan *chan,
 698                              struct cpdma_desc __iomem *desc,
 699                              int outlen, int status)
 700{
 701        struct cpdma_ctlr               *ctlr = chan->ctlr;
 702        struct cpdma_desc_pool          *pool = ctlr->pool;
 703        dma_addr_t                      buff_dma;
 704        int                             origlen;
 705        void                            *token;
 706
 707        token      = (void *)desc_read(desc, sw_token);
 708        buff_dma   = desc_read(desc, sw_buffer);
 709        origlen    = desc_read(desc, sw_len);
 710
 711        dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
 712        cpdma_desc_free(pool, desc, 1);
 713        (*chan->handler)(token, outlen, status);
 714}
 715
 716static int __cpdma_chan_process(struct cpdma_chan *chan)
 717{
 718        struct cpdma_ctlr               *ctlr = chan->ctlr;
 719        struct cpdma_desc __iomem       *desc;
 720        int                             status, outlen;
 721        struct cpdma_desc_pool          *pool = ctlr->pool;
 722        dma_addr_t                      desc_dma;
 723        unsigned long                   flags;
 724
 725        spin_lock_irqsave(&chan->lock, flags);
 726
 727        desc = chan->head;
 728        if (!desc) {
 729                chan->stats.empty_dequeue++;
 730                status = -ENOENT;
 731                goto unlock_ret;
 732        }
 733        desc_dma = desc_phys(pool, desc);
 734
 735        status  = __raw_readl(&desc->hw_mode);
 736        outlen  = status & 0x7ff;
 737        if (status & CPDMA_DESC_OWNER) {
 738                chan->stats.busy_dequeue++;
 739                status = -EBUSY;
 740                goto unlock_ret;
 741        }
 742        status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
 743
 744        chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
 745        chan_write(chan, cp, desc_dma);
 746        chan->count--;
 747        chan->stats.good_dequeue++;
 748
 749        if (status & CPDMA_DESC_EOQ) {
 750                chan->stats.requeue++;
 751                chan_write(chan, hdp, desc_phys(pool, chan->head));
 752        }
 753
 754        spin_unlock_irqrestore(&chan->lock, flags);
 755
 756        __cpdma_chan_free(chan, desc, outlen, status);
 757        return status;
 758
 759unlock_ret:
 760        spin_unlock_irqrestore(&chan->lock, flags);
 761        return status;
 762}
 763
 764int cpdma_chan_process(struct cpdma_chan *chan, int quota)
 765{
 766        int used = 0, ret = 0;
 767
 768        if (chan->state != CPDMA_STATE_ACTIVE)
 769                return -EINVAL;
 770
 771        while (used < quota) {
 772                ret = __cpdma_chan_process(chan);
 773                if (ret < 0)
 774                        break;
 775                used++;
 776        }
 777        return used;
 778}
 779
 780int cpdma_chan_start(struct cpdma_chan *chan)
 781{
 782        struct cpdma_ctlr       *ctlr = chan->ctlr;
 783        struct cpdma_desc_pool  *pool = ctlr->pool;
 784        unsigned long           flags;
 785
 786        spin_lock_irqsave(&chan->lock, flags);
 787        if (chan->state != CPDMA_STATE_IDLE) {
 788                spin_unlock_irqrestore(&chan->lock, flags);
 789                return -EBUSY;
 790        }
 791        if (ctlr->state != CPDMA_STATE_ACTIVE) {
 792                spin_unlock_irqrestore(&chan->lock, flags);
 793                return -EINVAL;
 794        }
 795        dma_reg_write(ctlr, chan->int_set, chan->mask);
 796        chan->state = CPDMA_STATE_ACTIVE;
 797        if (chan->head) {
 798                chan_write(chan, hdp, desc_phys(pool, chan->head));
 799                if (chan->rxfree)
 800                        chan_write(chan, rxfree, chan->count);
 801        }
 802
 803        spin_unlock_irqrestore(&chan->lock, flags);
 804        return 0;
 805}
 806
 807int cpdma_chan_stop(struct cpdma_chan *chan)
 808{
 809        struct cpdma_ctlr       *ctlr = chan->ctlr;
 810        struct cpdma_desc_pool  *pool = ctlr->pool;
 811        unsigned long           flags;
 812        int                     ret;
 813        unsigned long           timeout;
 814
 815        spin_lock_irqsave(&chan->lock, flags);
 816        if (chan->state != CPDMA_STATE_ACTIVE) {
 817                spin_unlock_irqrestore(&chan->lock, flags);
 818                return -EINVAL;
 819        }
 820
 821        chan->state = CPDMA_STATE_TEARDOWN;
 822        dma_reg_write(ctlr, chan->int_clear, chan->mask);
 823
 824        /* trigger teardown */
 825        dma_reg_write(ctlr, chan->td, chan->chan_num);
 826
 827        /* wait for teardown complete */
 828        timeout = jiffies + HZ/10;      /* 100 msec */
 829        while (time_before(jiffies, timeout)) {
 830                u32 cp = chan_read(chan, cp);
 831                if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
 832                        break;
 833                cpu_relax();
 834        }
 835        WARN_ON(!time_before(jiffies, timeout));
 836        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 837
 838        /* handle completed packets */
 839        do {
 840                ret = __cpdma_chan_process(chan);
 841                if (ret < 0)
 842                        break;
 843        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
 844
 845        /* remaining packets haven't been tx/rx'ed, clean them up */
 846        while (chan->head) {
 847                struct cpdma_desc __iomem *desc = chan->head;
 848                dma_addr_t next_dma;
 849
 850                next_dma = desc_read(desc, hw_next);
 851                chan->head = desc_from_phys(pool, next_dma);
 852                chan->stats.teardown_dequeue++;
 853
 854                /* issue callback without locks held */
 855                spin_unlock_irqrestore(&chan->lock, flags);
 856                __cpdma_chan_free(chan, desc, 0, -ENOSYS);
 857                spin_lock_irqsave(&chan->lock, flags);
 858        }
 859
 860        chan->state = CPDMA_STATE_IDLE;
 861        spin_unlock_irqrestore(&chan->lock, flags);
 862        return 0;
 863}
 864
 865int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
 866{
 867        unsigned long flags;
 868
 869        spin_lock_irqsave(&chan->lock, flags);
 870        if (chan->state != CPDMA_STATE_ACTIVE) {
 871                spin_unlock_irqrestore(&chan->lock, flags);
 872                return -EINVAL;
 873        }
 874
 875        dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
 876                      chan->mask);
 877        spin_unlock_irqrestore(&chan->lock, flags);
 878
 879        return 0;
 880}
 881
 882struct cpdma_control_info {
 883        u32             reg;
 884        u32             shift, mask;
 885        int             access;
 886#define ACCESS_RO       BIT(0)
 887#define ACCESS_WO       BIT(1)
 888#define ACCESS_RW       (ACCESS_RO | ACCESS_WO)
 889};
 890
 891struct cpdma_control_info controls[] = {
 892        [CPDMA_CMD_IDLE]          = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
 893        [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,  4,  1,      ACCESS_RW},
 894        [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,  2,  1,      ACCESS_RW},
 895        [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,  1,  1,      ACCESS_RW},
 896        [CPDMA_TX_PRIO_FIXED]     = {CPDMA_DMACONTROL,  0,  1,      ACCESS_RW},
 897        [CPDMA_STAT_IDLE]         = {CPDMA_DMASTATUS,   31, 1,      ACCESS_RO},
 898        [CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,   20, 0xf,    ACCESS_RW},
 899        [CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,   16, 0x7,    ACCESS_RW},
 900        [CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,   12, 0xf,    ACCESS_RW},
 901        [CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,   8,  0x7,    ACCESS_RW},
 902        [CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,   0,  0xffff, ACCESS_RW},
 903};
 904
 905int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
 906{
 907        unsigned long flags;
 908        struct cpdma_control_info *info = &controls[control];
 909        int ret;
 910
 911        spin_lock_irqsave(&ctlr->lock, flags);
 912
 913        ret = -ENOTSUPP;
 914        if (!ctlr->params.has_ext_regs)
 915                goto unlock_ret;
 916
 917        ret = -EINVAL;
 918        if (ctlr->state != CPDMA_STATE_ACTIVE)
 919                goto unlock_ret;
 920
 921        ret = -ENOENT;
 922        if (control < 0 || control >= ARRAY_SIZE(controls))
 923                goto unlock_ret;
 924
 925        ret = -EPERM;
 926        if ((info->access & ACCESS_RO) != ACCESS_RO)
 927                goto unlock_ret;
 928
 929        ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
 930
 931unlock_ret:
 932        spin_unlock_irqrestore(&ctlr->lock, flags);
 933        return ret;
 934}
 935
 936int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
 937{
 938        unsigned long flags;
 939        struct cpdma_control_info *info = &controls[control];
 940        int ret;
 941        u32 val;
 942
 943        spin_lock_irqsave(&ctlr->lock, flags);
 944
 945        ret = -ENOTSUPP;
 946        if (!ctlr->params.has_ext_regs)
 947                goto unlock_ret;
 948
 949        ret = -EINVAL;
 950        if (ctlr->state != CPDMA_STATE_ACTIVE)
 951                goto unlock_ret;
 952
 953        ret = -ENOENT;
 954        if (control < 0 || control >= ARRAY_SIZE(controls))
 955                goto unlock_ret;
 956
 957        ret = -EPERM;
 958        if ((info->access & ACCESS_WO) != ACCESS_WO)
 959                goto unlock_ret;
 960
 961        val  = dma_reg_read(ctlr, info->reg);
 962        val &= ~(info->mask << info->shift);
 963        val |= (value & info->mask) << info->shift;
 964        dma_reg_write(ctlr, info->reg, val);
 965        ret = 0;
 966
 967unlock_ret:
 968        spin_unlock_irqrestore(&ctlr->lock, flags);
 969        return ret;
 970}
 971