linux/drivers/dma/fsl-edma-common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
   4// Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
   5
   6#include <linux/dmapool.h>
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9
  10#include "fsl-edma-common.h"
  11
  12#define EDMA_CR                 0x00
  13#define EDMA_ES                 0x04
  14#define EDMA_ERQ                0x0C
  15#define EDMA_EEI                0x14
  16#define EDMA_SERQ               0x1B
  17#define EDMA_CERQ               0x1A
  18#define EDMA_SEEI               0x19
  19#define EDMA_CEEI               0x18
  20#define EDMA_CINT               0x1F
  21#define EDMA_CERR               0x1E
  22#define EDMA_SSRT               0x1D
  23#define EDMA_CDNE               0x1C
  24#define EDMA_INTR               0x24
  25#define EDMA_ERR                0x2C
  26
  27#define EDMA64_ERQH             0x08
  28#define EDMA64_EEIH             0x10
  29#define EDMA64_SERQ             0x18
  30#define EDMA64_CERQ             0x19
  31#define EDMA64_SEEI             0x1a
  32#define EDMA64_CEEI             0x1b
  33#define EDMA64_CINT             0x1c
  34#define EDMA64_CERR             0x1d
  35#define EDMA64_SSRT             0x1e
  36#define EDMA64_CDNE             0x1f
  37#define EDMA64_INTH             0x20
  38#define EDMA64_INTL             0x24
  39#define EDMA64_ERRH             0x28
  40#define EDMA64_ERRL             0x2c
  41
  42#define EDMA_TCD                0x1000
  43
  44static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
  45{
  46        struct edma_regs *regs = &fsl_chan->edma->regs;
  47        u32 ch = fsl_chan->vchan.chan.chan_id;
  48
  49        if (fsl_chan->edma->version == v1) {
  50                edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
  51                edma_writeb(fsl_chan->edma, ch, regs->serq);
  52        } else {
  53                /* ColdFire is big endian, and accesses natively
  54                 * big endian I/O peripherals
  55                 */
  56                iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
  57                iowrite8(ch, regs->serq);
  58        }
  59}
  60
  61void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
  62{
  63        struct edma_regs *regs = &fsl_chan->edma->regs;
  64        u32 ch = fsl_chan->vchan.chan.chan_id;
  65
  66        if (fsl_chan->edma->version == v1) {
  67                edma_writeb(fsl_chan->edma, ch, regs->cerq);
  68                edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
  69        } else {
  70                /* ColdFire is big endian, and accesses natively
  71                 * big endian I/O peripherals
  72                 */
  73                iowrite8(ch, regs->cerq);
  74                iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
  75        }
  76}
  77EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
  78
  79void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
  80                        unsigned int slot, bool enable)
  81{
  82        u32 ch = fsl_chan->vchan.chan.chan_id;
  83        void __iomem *muxaddr;
  84        unsigned int chans_per_mux, ch_off;
  85
  86        chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
  87        ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
  88        muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
  89        slot = EDMAMUX_CHCFG_SOURCE(slot);
  90
  91        if (enable)
  92                iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
  93        else
  94                iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
  95}
  96EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
  97
  98static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
  99{
 100        switch (addr_width) {
 101        case 1:
 102                return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
 103        case 2:
 104                return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
 105        case 4:
 106                return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
 107        case 8:
 108                return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
 109        default:
 110                return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
 111        }
 112}
 113
 114void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
 115{
 116        struct fsl_edma_desc *fsl_desc;
 117        int i;
 118
 119        fsl_desc = to_fsl_edma_desc(vdesc);
 120        for (i = 0; i < fsl_desc->n_tcds; i++)
 121                dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
 122                              fsl_desc->tcd[i].ptcd);
 123        kfree(fsl_desc);
 124}
 125EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
 126
 127int fsl_edma_terminate_all(struct dma_chan *chan)
 128{
 129        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 130        unsigned long flags;
 131        LIST_HEAD(head);
 132
 133        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 134        fsl_edma_disable_request(fsl_chan);
 135        fsl_chan->edesc = NULL;
 136        fsl_chan->idle = true;
 137        vchan_get_all_descriptors(&fsl_chan->vchan, &head);
 138        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 139        vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 140        return 0;
 141}
 142EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
 143
 144int fsl_edma_pause(struct dma_chan *chan)
 145{
 146        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 147        unsigned long flags;
 148
 149        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 150        if (fsl_chan->edesc) {
 151                fsl_edma_disable_request(fsl_chan);
 152                fsl_chan->status = DMA_PAUSED;
 153                fsl_chan->idle = true;
 154        }
 155        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 156        return 0;
 157}
 158EXPORT_SYMBOL_GPL(fsl_edma_pause);
 159
 160int fsl_edma_resume(struct dma_chan *chan)
 161{
 162        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 163        unsigned long flags;
 164
 165        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 166        if (fsl_chan->edesc) {
 167                fsl_edma_enable_request(fsl_chan);
 168                fsl_chan->status = DMA_IN_PROGRESS;
 169                fsl_chan->idle = false;
 170        }
 171        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 172        return 0;
 173}
 174EXPORT_SYMBOL_GPL(fsl_edma_resume);
 175
 176int fsl_edma_slave_config(struct dma_chan *chan,
 177                                 struct dma_slave_config *cfg)
 178{
 179        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 180
 181        memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
 182
 183        return 0;
 184}
 185EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
 186
 187static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
 188                struct virt_dma_desc *vdesc, bool in_progress)
 189{
 190        struct fsl_edma_desc *edesc = fsl_chan->edesc;
 191        struct edma_regs *regs = &fsl_chan->edma->regs;
 192        u32 ch = fsl_chan->vchan.chan.chan_id;
 193        enum dma_transfer_direction dir = edesc->dirn;
 194        dma_addr_t cur_addr, dma_addr;
 195        size_t len, size;
 196        int i;
 197
 198        /* calculate the total size in this desc */
 199        for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
 200                len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
 201                        * le16_to_cpu(edesc->tcd[i].vtcd->biter);
 202
 203        if (!in_progress)
 204                return len;
 205
 206        if (dir == DMA_MEM_TO_DEV)
 207                cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
 208        else
 209                cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
 210
 211        /* figure out the finished and calculate the residue */
 212        for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
 213                size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
 214                        * le16_to_cpu(edesc->tcd[i].vtcd->biter);
 215                if (dir == DMA_MEM_TO_DEV)
 216                        dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
 217                else
 218                        dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
 219
 220                len -= size;
 221                if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
 222                        len += dma_addr + size - cur_addr;
 223                        break;
 224                }
 225        }
 226
 227        return len;
 228}
 229
 230enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
 231                dma_cookie_t cookie, struct dma_tx_state *txstate)
 232{
 233        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 234        struct virt_dma_desc *vdesc;
 235        enum dma_status status;
 236        unsigned long flags;
 237
 238        status = dma_cookie_status(chan, cookie, txstate);
 239        if (status == DMA_COMPLETE)
 240                return status;
 241
 242        if (!txstate)
 243                return fsl_chan->status;
 244
 245        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 246        vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
 247        if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
 248                txstate->residue =
 249                        fsl_edma_desc_residue(fsl_chan, vdesc, true);
 250        else if (vdesc)
 251                txstate->residue =
 252                        fsl_edma_desc_residue(fsl_chan, vdesc, false);
 253        else
 254                txstate->residue = 0;
 255
 256        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 257
 258        return fsl_chan->status;
 259}
 260EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
 261
 262static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
 263                                  struct fsl_edma_hw_tcd *tcd)
 264{
 265        struct fsl_edma_engine *edma = fsl_chan->edma;
 266        struct edma_regs *regs = &fsl_chan->edma->regs;
 267        u32 ch = fsl_chan->vchan.chan.chan_id;
 268
 269        /*
 270         * TCD parameters are stored in struct fsl_edma_hw_tcd in little
 271         * endian format. However, we need to load the TCD registers in
 272         * big- or little-endian obeying the eDMA engine model endian.
 273         */
 274        edma_writew(edma, 0,  &regs->tcd[ch].csr);
 275        edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
 276        edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
 277
 278        edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
 279        edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
 280
 281        edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
 282        edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
 283
 284        edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
 285        edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
 286        edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
 287
 288        edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
 289                        &regs->tcd[ch].dlast_sga);
 290
 291        edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
 292}
 293
 294static inline
 295void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
 296                       u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
 297                       u16 biter, u16 doff, u32 dlast_sga, bool major_int,
 298                       bool disable_req, bool enable_sg)
 299{
 300        u16 csr = 0;
 301
 302        /*
 303         * eDMA hardware SGs require the TCDs to be stored in little
 304         * endian format irrespective of the register endian model.
 305         * So we put the value in little endian in memory, waiting
 306         * for fsl_edma_set_tcd_regs doing the swap.
 307         */
 308        tcd->saddr = cpu_to_le32(src);
 309        tcd->daddr = cpu_to_le32(dst);
 310
 311        tcd->attr = cpu_to_le16(attr);
 312
 313        tcd->soff = cpu_to_le16(soff);
 314
 315        tcd->nbytes = cpu_to_le32(nbytes);
 316        tcd->slast = cpu_to_le32(slast);
 317
 318        tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
 319        tcd->doff = cpu_to_le16(doff);
 320
 321        tcd->dlast_sga = cpu_to_le32(dlast_sga);
 322
 323        tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
 324        if (major_int)
 325                csr |= EDMA_TCD_CSR_INT_MAJOR;
 326
 327        if (disable_req)
 328                csr |= EDMA_TCD_CSR_D_REQ;
 329
 330        if (enable_sg)
 331                csr |= EDMA_TCD_CSR_E_SG;
 332
 333        tcd->csr = cpu_to_le16(csr);
 334}
 335
 336static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
 337                int sg_len)
 338{
 339        struct fsl_edma_desc *fsl_desc;
 340        int i;
 341
 342        fsl_desc = kzalloc(sizeof(*fsl_desc) +
 343                           sizeof(struct fsl_edma_sw_tcd) *
 344                           sg_len, GFP_NOWAIT);
 345        if (!fsl_desc)
 346                return NULL;
 347
 348        fsl_desc->echan = fsl_chan;
 349        fsl_desc->n_tcds = sg_len;
 350        for (i = 0; i < sg_len; i++) {
 351                fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
 352                                        GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
 353                if (!fsl_desc->tcd[i].vtcd)
 354                        goto err;
 355        }
 356        return fsl_desc;
 357
 358err:
 359        while (--i >= 0)
 360                dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
 361                                fsl_desc->tcd[i].ptcd);
 362        kfree(fsl_desc);
 363        return NULL;
 364}
 365
 366struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 367                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 368                size_t period_len, enum dma_transfer_direction direction,
 369                unsigned long flags)
 370{
 371        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 372        struct fsl_edma_desc *fsl_desc;
 373        dma_addr_t dma_buf_next;
 374        int sg_len, i;
 375        u32 src_addr, dst_addr, last_sg, nbytes;
 376        u16 soff, doff, iter;
 377
 378        if (!is_slave_direction(direction))
 379                return NULL;
 380
 381        sg_len = buf_len / period_len;
 382        fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
 383        if (!fsl_desc)
 384                return NULL;
 385        fsl_desc->iscyclic = true;
 386        fsl_desc->dirn = direction;
 387
 388        dma_buf_next = dma_addr;
 389        if (direction == DMA_MEM_TO_DEV) {
 390                fsl_chan->attr =
 391                        fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
 392                nbytes = fsl_chan->cfg.dst_addr_width *
 393                        fsl_chan->cfg.dst_maxburst;
 394        } else {
 395                fsl_chan->attr =
 396                        fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
 397                nbytes = fsl_chan->cfg.src_addr_width *
 398                        fsl_chan->cfg.src_maxburst;
 399        }
 400
 401        iter = period_len / nbytes;
 402
 403        for (i = 0; i < sg_len; i++) {
 404                if (dma_buf_next >= dma_addr + buf_len)
 405                        dma_buf_next = dma_addr;
 406
 407                /* get next sg's physical address */
 408                last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
 409
 410                if (direction == DMA_MEM_TO_DEV) {
 411                        src_addr = dma_buf_next;
 412                        dst_addr = fsl_chan->cfg.dst_addr;
 413                        soff = fsl_chan->cfg.dst_addr_width;
 414                        doff = 0;
 415                } else {
 416                        src_addr = fsl_chan->cfg.src_addr;
 417                        dst_addr = dma_buf_next;
 418                        soff = 0;
 419                        doff = fsl_chan->cfg.src_addr_width;
 420                }
 421
 422                fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
 423                                  fsl_chan->attr, soff, nbytes, 0, iter,
 424                                  iter, doff, last_sg, true, false, true);
 425                dma_buf_next += period_len;
 426        }
 427
 428        return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
 429}
 430EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
 431
 432struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 433                struct dma_chan *chan, struct scatterlist *sgl,
 434                unsigned int sg_len, enum dma_transfer_direction direction,
 435                unsigned long flags, void *context)
 436{
 437        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 438        struct fsl_edma_desc *fsl_desc;
 439        struct scatterlist *sg;
 440        u32 src_addr, dst_addr, last_sg, nbytes;
 441        u16 soff, doff, iter;
 442        int i;
 443
 444        if (!is_slave_direction(direction))
 445                return NULL;
 446
 447        fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
 448        if (!fsl_desc)
 449                return NULL;
 450        fsl_desc->iscyclic = false;
 451        fsl_desc->dirn = direction;
 452
 453        if (direction == DMA_MEM_TO_DEV) {
 454                fsl_chan->attr =
 455                        fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
 456                nbytes = fsl_chan->cfg.dst_addr_width *
 457                        fsl_chan->cfg.dst_maxburst;
 458        } else {
 459                fsl_chan->attr =
 460                        fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
 461                nbytes = fsl_chan->cfg.src_addr_width *
 462                        fsl_chan->cfg.src_maxburst;
 463        }
 464
 465        for_each_sg(sgl, sg, sg_len, i) {
 466                /* get next sg's physical address */
 467                last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
 468
 469                if (direction == DMA_MEM_TO_DEV) {
 470                        src_addr = sg_dma_address(sg);
 471                        dst_addr = fsl_chan->cfg.dst_addr;
 472                        soff = fsl_chan->cfg.dst_addr_width;
 473                        doff = 0;
 474                } else {
 475                        src_addr = fsl_chan->cfg.src_addr;
 476                        dst_addr = sg_dma_address(sg);
 477                        soff = 0;
 478                        doff = fsl_chan->cfg.src_addr_width;
 479                }
 480
 481                iter = sg_dma_len(sg) / nbytes;
 482                if (i < sg_len - 1) {
 483                        last_sg = fsl_desc->tcd[(i + 1)].ptcd;
 484                        fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
 485                                          dst_addr, fsl_chan->attr, soff,
 486                                          nbytes, 0, iter, iter, doff, last_sg,
 487                                          false, false, true);
 488                } else {
 489                        last_sg = 0;
 490                        fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
 491                                          dst_addr, fsl_chan->attr, soff,
 492                                          nbytes, 0, iter, iter, doff, last_sg,
 493                                          true, true, false);
 494                }
 495        }
 496
 497        return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
 498}
 499EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
 500
 501void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
 502{
 503        struct virt_dma_desc *vdesc;
 504
 505        vdesc = vchan_next_desc(&fsl_chan->vchan);
 506        if (!vdesc)
 507                return;
 508        fsl_chan->edesc = to_fsl_edma_desc(vdesc);
 509        fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
 510        fsl_edma_enable_request(fsl_chan);
 511        fsl_chan->status = DMA_IN_PROGRESS;
 512        fsl_chan->idle = false;
 513}
 514EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
 515
 516void fsl_edma_issue_pending(struct dma_chan *chan)
 517{
 518        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 519        unsigned long flags;
 520
 521        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 522
 523        if (unlikely(fsl_chan->pm_state != RUNNING)) {
 524                spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 525                /* cannot submit due to suspend */
 526                return;
 527        }
 528
 529        if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
 530                fsl_edma_xfer_desc(fsl_chan);
 531
 532        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 533}
 534EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
 535
 536int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
 537{
 538        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 539
 540        fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
 541                                sizeof(struct fsl_edma_hw_tcd),
 542                                32, 0);
 543        return 0;
 544}
 545EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
 546
 547void fsl_edma_free_chan_resources(struct dma_chan *chan)
 548{
 549        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 550        unsigned long flags;
 551        LIST_HEAD(head);
 552
 553        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 554        fsl_edma_disable_request(fsl_chan);
 555        fsl_edma_chan_mux(fsl_chan, 0, false);
 556        fsl_chan->edesc = NULL;
 557        vchan_get_all_descriptors(&fsl_chan->vchan, &head);
 558        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 559
 560        vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 561        dma_pool_destroy(fsl_chan->tcd_pool);
 562        fsl_chan->tcd_pool = NULL;
 563}
 564EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
 565
 566void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
 567{
 568        struct fsl_edma_chan *chan, *_chan;
 569
 570        list_for_each_entry_safe(chan, _chan,
 571                                &dmadev->channels, vchan.chan.device_node) {
 572                list_del(&chan->vchan.chan.device_node);
 573                tasklet_kill(&chan->vchan.task);
 574        }
 575}
 576EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
 577
 578/*
 579 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
 580 * register offsets are different compared to ColdFire mcf5441x 64 channels
 581 * edma (here called "v2").
 582 *
 583 * This function sets up register offsets as per proper declared version
 584 * so must be called in xxx_edma_probe() just after setting the
 585 * edma "version" and "membase" appropriately.
 586 */
 587void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
 588{
 589        edma->regs.cr = edma->membase + EDMA_CR;
 590        edma->regs.es = edma->membase + EDMA_ES;
 591        edma->regs.erql = edma->membase + EDMA_ERQ;
 592        edma->regs.eeil = edma->membase + EDMA_EEI;
 593
 594        edma->regs.serq = edma->membase + ((edma->version == v1) ?
 595                        EDMA_SERQ : EDMA64_SERQ);
 596        edma->regs.cerq = edma->membase + ((edma->version == v1) ?
 597                        EDMA_CERQ : EDMA64_CERQ);
 598        edma->regs.seei = edma->membase + ((edma->version == v1) ?
 599                        EDMA_SEEI : EDMA64_SEEI);
 600        edma->regs.ceei = edma->membase + ((edma->version == v1) ?
 601                        EDMA_CEEI : EDMA64_CEEI);
 602        edma->regs.cint = edma->membase + ((edma->version == v1) ?
 603                        EDMA_CINT : EDMA64_CINT);
 604        edma->regs.cerr = edma->membase + ((edma->version == v1) ?
 605                        EDMA_CERR : EDMA64_CERR);
 606        edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
 607                        EDMA_SSRT : EDMA64_SSRT);
 608        edma->regs.cdne = edma->membase + ((edma->version == v1) ?
 609                        EDMA_CDNE : EDMA64_CDNE);
 610        edma->regs.intl = edma->membase + ((edma->version == v1) ?
 611                        EDMA_INTR : EDMA64_INTL);
 612        edma->regs.errl = edma->membase + ((edma->version == v1) ?
 613                        EDMA_ERR : EDMA64_ERRL);
 614
 615        if (edma->version == v2) {
 616                edma->regs.erqh = edma->membase + EDMA64_ERQH;
 617                edma->regs.eeih = edma->membase + EDMA64_EEIH;
 618                edma->regs.errh = edma->membase + EDMA64_ERRH;
 619                edma->regs.inth = edma->membase + EDMA64_INTH;
 620        }
 621
 622        edma->regs.tcd = edma->membase + EDMA_TCD;
 623}
 624EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
 625
 626MODULE_LICENSE("GPL v2");
 627