linux/drivers/dma/mcf-edma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
   4// Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
   5
   6#include <linux/module.h>
   7#include <linux/interrupt.h>
   8#include <linux/dmaengine.h>
   9#include <linux/platform_device.h>
  10#include <linux/platform_data/dma-mcf-edma.h>
  11
  12#include "fsl-edma-common.h"
  13
  14#define EDMA_CHANNELS           64
  15#define EDMA_MASK_CH(x)         ((x) & GENMASK(5, 0))
  16
  17static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
  18{
  19        struct fsl_edma_engine *mcf_edma = dev_id;
  20        struct edma_regs *regs = &mcf_edma->regs;
  21        unsigned int ch;
  22        struct fsl_edma_chan *mcf_chan;
  23        u64 intmap;
  24
  25        intmap = ioread32(regs->inth);
  26        intmap <<= 32;
  27        intmap |= ioread32(regs->intl);
  28        if (!intmap)
  29                return IRQ_NONE;
  30
  31        for (ch = 0; ch < mcf_edma->n_chans; ch++) {
  32                if (intmap & BIT(ch)) {
  33                        iowrite8(EDMA_MASK_CH(ch), regs->cint);
  34
  35                        mcf_chan = &mcf_edma->chans[ch];
  36
  37                        spin_lock(&mcf_chan->vchan.lock);
  38                        if (!mcf_chan->edesc->iscyclic) {
  39                                list_del(&mcf_chan->edesc->vdesc.node);
  40                                vchan_cookie_complete(&mcf_chan->edesc->vdesc);
  41                                mcf_chan->edesc = NULL;
  42                                mcf_chan->status = DMA_COMPLETE;
  43                                mcf_chan->idle = true;
  44                        } else {
  45                                vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
  46                        }
  47
  48                        if (!mcf_chan->edesc)
  49                                fsl_edma_xfer_desc(mcf_chan);
  50
  51                        spin_unlock(&mcf_chan->vchan.lock);
  52                }
  53        }
  54
  55        return IRQ_HANDLED;
  56}
  57
  58static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
  59{
  60        struct fsl_edma_engine *mcf_edma = dev_id;
  61        struct edma_regs *regs = &mcf_edma->regs;
  62        unsigned int err, ch;
  63
  64        err = ioread32(regs->errl);
  65        if (!err)
  66                return IRQ_NONE;
  67
  68        for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
  69                if (err & BIT(ch)) {
  70                        fsl_edma_disable_request(&mcf_edma->chans[ch]);
  71                        iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
  72                        mcf_edma->chans[ch].status = DMA_ERROR;
  73                        mcf_edma->chans[ch].idle = true;
  74                }
  75        }
  76
  77        err = ioread32(regs->errh);
  78        if (!err)
  79                return IRQ_NONE;
  80
  81        for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
  82                if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
  83                        fsl_edma_disable_request(&mcf_edma->chans[ch]);
  84                        iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
  85                        mcf_edma->chans[ch].status = DMA_ERROR;
  86                        mcf_edma->chans[ch].idle = true;
  87                }
  88        }
  89
  90        return IRQ_HANDLED;
  91}
  92
  93static int mcf_edma_irq_init(struct platform_device *pdev,
  94                                struct fsl_edma_engine *mcf_edma)
  95{
  96        int ret = 0, i;
  97        struct resource *res;
  98
  99        res = platform_get_resource_byname(pdev,
 100                                IORESOURCE_IRQ, "edma-tx-00-15");
 101        if (!res)
 102                return -1;
 103
 104        for (ret = 0, i = res->start; i <= res->end; ++i)
 105                ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
 106        if (ret)
 107                return ret;
 108
 109        res = platform_get_resource_byname(pdev,
 110                        IORESOURCE_IRQ, "edma-tx-16-55");
 111        if (!res)
 112                return -1;
 113
 114        for (ret = 0, i = res->start; i <= res->end; ++i)
 115                ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
 116        if (ret)
 117                return ret;
 118
 119        ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
 120        if (ret != -ENXIO) {
 121                ret = request_irq(ret, mcf_edma_tx_handler,
 122                                  0, "eDMA", mcf_edma);
 123                if (ret)
 124                        return ret;
 125        }
 126
 127        ret = platform_get_irq_byname(pdev, "edma-err");
 128        if (ret != -ENXIO) {
 129                ret = request_irq(ret, mcf_edma_err_handler,
 130                                  0, "eDMA", mcf_edma);
 131                if (ret)
 132                        return ret;
 133        }
 134
 135        return 0;
 136}
 137
 138static void mcf_edma_irq_free(struct platform_device *pdev,
 139                                struct fsl_edma_engine *mcf_edma)
 140{
 141        int irq;
 142        struct resource *res;
 143
 144        res = platform_get_resource_byname(pdev,
 145                        IORESOURCE_IRQ, "edma-tx-00-15");
 146        if (res) {
 147                for (irq = res->start; irq <= res->end; irq++)
 148                        free_irq(irq, mcf_edma);
 149        }
 150
 151        res = platform_get_resource_byname(pdev,
 152                        IORESOURCE_IRQ, "edma-tx-16-55");
 153        if (res) {
 154                for (irq = res->start; irq <= res->end; irq++)
 155                        free_irq(irq, mcf_edma);
 156        }
 157
 158        irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
 159        if (irq != -ENXIO)
 160                free_irq(irq, mcf_edma);
 161
 162        irq = platform_get_irq_byname(pdev, "edma-err");
 163        if (irq != -ENXIO)
 164                free_irq(irq, mcf_edma);
 165}
 166
 167static int mcf_edma_probe(struct platform_device *pdev)
 168{
 169        struct mcf_edma_platform_data *pdata;
 170        struct fsl_edma_engine *mcf_edma;
 171        struct fsl_edma_chan *mcf_chan;
 172        struct edma_regs *regs;
 173        struct resource *res;
 174        int ret, i, len, chans;
 175
 176        pdata = dev_get_platdata(&pdev->dev);
 177        if (!pdata) {
 178                dev_err(&pdev->dev, "no platform data supplied\n");
 179                return -EINVAL;
 180        }
 181
 182        chans = pdata->dma_channels;
 183        len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
 184        mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
 185        if (!mcf_edma)
 186                return -ENOMEM;
 187
 188        mcf_edma->n_chans = chans;
 189
 190        /* Set up version for ColdFire edma */
 191        mcf_edma->version = v2;
 192        mcf_edma->big_endian = 1;
 193
 194        if (!mcf_edma->n_chans) {
 195                dev_info(&pdev->dev, "setting default channel number to 64");
 196                mcf_edma->n_chans = 64;
 197        }
 198
 199        mutex_init(&mcf_edma->fsl_edma_mutex);
 200
 201        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 202
 203        mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
 204        if (IS_ERR(mcf_edma->membase))
 205                return PTR_ERR(mcf_edma->membase);
 206
 207        fsl_edma_setup_regs(mcf_edma);
 208        regs = &mcf_edma->regs;
 209
 210        INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
 211        for (i = 0; i < mcf_edma->n_chans; i++) {
 212                struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
 213
 214                mcf_chan->edma = mcf_edma;
 215                mcf_chan->slave_id = i;
 216                mcf_chan->idle = true;
 217                mcf_chan->vchan.desc_free = fsl_edma_free_desc;
 218                vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
 219                iowrite32(0x0, &regs->tcd[i].csr);
 220        }
 221
 222        iowrite32(~0, regs->inth);
 223        iowrite32(~0, regs->intl);
 224
 225        ret = mcf_edma_irq_init(pdev, mcf_edma);
 226        if (ret)
 227                return ret;
 228
 229        dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
 230        dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
 231        dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
 232
 233        mcf_edma->dma_dev.dev = &pdev->dev;
 234        mcf_edma->dma_dev.device_alloc_chan_resources =
 235                        fsl_edma_alloc_chan_resources;
 236        mcf_edma->dma_dev.device_free_chan_resources =
 237                        fsl_edma_free_chan_resources;
 238        mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
 239        mcf_edma->dma_dev.device_prep_dma_cyclic =
 240                        fsl_edma_prep_dma_cyclic;
 241        mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
 242        mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
 243        mcf_edma->dma_dev.device_pause = fsl_edma_pause;
 244        mcf_edma->dma_dev.device_resume = fsl_edma_resume;
 245        mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
 246        mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
 247
 248        mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
 249        mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
 250        mcf_edma->dma_dev.directions =
 251                        BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 252
 253        mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
 254        mcf_edma->dma_dev.filter.map = pdata->slave_map;
 255        mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
 256
 257        platform_set_drvdata(pdev, mcf_edma);
 258
 259        ret = dma_async_device_register(&mcf_edma->dma_dev);
 260        if (ret) {
 261                dev_err(&pdev->dev,
 262                        "Can't register Freescale eDMA engine. (%d)\n", ret);
 263                return ret;
 264        }
 265
 266        /* Enable round robin arbitration */
 267        iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
 268
 269        return 0;
 270}
 271
 272static int mcf_edma_remove(struct platform_device *pdev)
 273{
 274        struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
 275
 276        mcf_edma_irq_free(pdev, mcf_edma);
 277        fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
 278        dma_async_device_unregister(&mcf_edma->dma_dev);
 279
 280        return 0;
 281}
 282
 283static struct platform_driver mcf_edma_driver = {
 284        .driver         = {
 285                .name   = "mcf-edma",
 286        },
 287        .probe          = mcf_edma_probe,
 288        .remove         = mcf_edma_remove,
 289};
 290
 291bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
 292{
 293        if (chan->device->dev->driver == &mcf_edma_driver.driver) {
 294                struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
 295
 296                return (mcf_chan->slave_id == (uintptr_t)param);
 297        }
 298
 299        return false;
 300}
 301EXPORT_SYMBOL(mcf_edma_filter_fn);
 302
 303static int __init mcf_edma_init(void)
 304{
 305        return platform_driver_register(&mcf_edma_driver);
 306}
 307subsys_initcall(mcf_edma_init);
 308
 309static void __exit mcf_edma_exit(void)
 310{
 311        platform_driver_unregister(&mcf_edma_driver);
 312}
 313module_exit(mcf_edma_exit);
 314
 315MODULE_ALIAS("platform:mcf-edma");
 316MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
 317MODULE_LICENSE("GPL v2");
 318