linux/drivers/dma/sf-pdma/sf-pdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SiFive FU540 Platform DMA driver
   4 * Copyright (C) 2019 SiFive
   5 *
   6 * Based partially on:
   7 * - drivers/dma/fsl-edma.c
   8 * - drivers/dma/dw-edma/
   9 * - drivers/dma/pxa-dma.c
  10 *
  11 * See the following sources for further documentation:
  12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
  13 *   SiFive FU540-C000 v1.0
  14 *   https://static.dev.sifive.com/FU540-C000-v1.0.pdf
  15 */
  16#include <linux/module.h>
  17#include <linux/device.h>
  18#include <linux/kernel.h>
  19#include <linux/platform_device.h>
  20#include <linux/mod_devicetable.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/of.h>
  23#include <linux/slab.h>
  24
  25#include "sf-pdma.h"
  26
  27#ifndef readq
  28static inline unsigned long long readq(void __iomem *addr)
  29{
  30        return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
  31}
  32#endif
  33
  34#ifndef writeq
  35static inline void writeq(unsigned long long v, void __iomem *addr)
  36{
  37        writel(lower_32_bits(v), addr);
  38        writel(upper_32_bits(v), addr + 4);
  39}
  40#endif
  41
  42static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
  43{
  44        return container_of(dchan, struct sf_pdma_chan, vchan.chan);
  45}
  46
  47static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
  48{
  49        return container_of(vd, struct sf_pdma_desc, vdesc);
  50}
  51
  52static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
  53{
  54        struct sf_pdma_desc *desc;
  55        unsigned long flags;
  56
  57        spin_lock_irqsave(&chan->lock, flags);
  58
  59        if (chan->desc && !chan->desc->in_use) {
  60                spin_unlock_irqrestore(&chan->lock, flags);
  61                return chan->desc;
  62        }
  63
  64        spin_unlock_irqrestore(&chan->lock, flags);
  65
  66        desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
  67        if (!desc)
  68                return NULL;
  69
  70        desc->chan = chan;
  71
  72        return desc;
  73}
  74
  75static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
  76                              u64 dst, u64 src, u64 size)
  77{
  78        desc->xfer_type = PDMA_FULL_SPEED;
  79        desc->xfer_size = size;
  80        desc->dst_addr = dst;
  81        desc->src_addr = src;
  82}
  83
  84static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
  85{
  86        struct pdma_regs *regs = &chan->regs;
  87
  88        writel(PDMA_CLEAR_CTRL, regs->ctrl);
  89}
  90
  91static struct dma_async_tx_descriptor *
  92sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
  93                        size_t len, unsigned long flags)
  94{
  95        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  96        struct sf_pdma_desc *desc;
  97        unsigned long iflags;
  98
  99        if (chan && (!len || !dest || !src)) {
 100                dev_err(chan->pdma->dma_dev.dev,
 101                        "Please check dma len, dest, src!\n");
 102                return NULL;
 103        }
 104
 105        desc = sf_pdma_alloc_desc(chan);
 106        if (!desc)
 107                return NULL;
 108
 109        desc->in_use = true;
 110        desc->dirn = DMA_MEM_TO_MEM;
 111        desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 112
 113        spin_lock_irqsave(&chan->vchan.lock, iflags);
 114        chan->desc = desc;
 115        sf_pdma_fill_desc(desc, dest, src, len);
 116        spin_unlock_irqrestore(&chan->vchan.lock, iflags);
 117
 118        return desc->async_tx;
 119}
 120
 121static int sf_pdma_slave_config(struct dma_chan *dchan,
 122                                struct dma_slave_config *cfg)
 123{
 124        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
 125
 126        memcpy(&chan->cfg, cfg, sizeof(*cfg));
 127
 128        return 0;
 129}
 130
 131static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
 132{
 133        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
 134        struct pdma_regs *regs = &chan->regs;
 135
 136        dma_cookie_init(dchan);
 137        writel(PDMA_CLAIM_MASK, regs->ctrl);
 138
 139        return 0;
 140}
 141
 142static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
 143{
 144        struct pdma_regs *regs = &chan->regs;
 145
 146        writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
 147}
 148
 149static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
 150{
 151        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
 152        unsigned long flags;
 153        LIST_HEAD(head);
 154
 155        spin_lock_irqsave(&chan->vchan.lock, flags);
 156        sf_pdma_disable_request(chan);
 157        kfree(chan->desc);
 158        chan->desc = NULL;
 159        vchan_get_all_descriptors(&chan->vchan, &head);
 160        sf_pdma_disclaim_chan(chan);
 161        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 162        vchan_dma_desc_free_list(&chan->vchan, &head);
 163}
 164
 165static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
 166                                   dma_cookie_t cookie)
 167{
 168        struct virt_dma_desc *vd = NULL;
 169        struct pdma_regs *regs = &chan->regs;
 170        unsigned long flags;
 171        u64 residue = 0;
 172        struct sf_pdma_desc *desc;
 173        struct dma_async_tx_descriptor *tx;
 174
 175        spin_lock_irqsave(&chan->vchan.lock, flags);
 176
 177        tx = &chan->desc->vdesc.tx;
 178        if (cookie == tx->chan->completed_cookie)
 179                goto out;
 180
 181        if (cookie == tx->cookie) {
 182                residue = readq(regs->residue);
 183        } else {
 184                vd = vchan_find_desc(&chan->vchan, cookie);
 185                if (!vd)
 186                        goto out;
 187
 188                desc = to_sf_pdma_desc(vd);
 189                residue = desc->xfer_size;
 190        }
 191
 192out:
 193        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 194        return residue;
 195}
 196
 197static enum dma_status
 198sf_pdma_tx_status(struct dma_chan *dchan,
 199                  dma_cookie_t cookie,
 200                  struct dma_tx_state *txstate)
 201{
 202        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
 203        enum dma_status status;
 204
 205        status = dma_cookie_status(dchan, cookie, txstate);
 206
 207        if (txstate && status != DMA_ERROR)
 208                dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
 209
 210        return status;
 211}
 212
 213static int sf_pdma_terminate_all(struct dma_chan *dchan)
 214{
 215        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
 216        unsigned long flags;
 217        LIST_HEAD(head);
 218
 219        spin_lock_irqsave(&chan->vchan.lock, flags);
 220        sf_pdma_disable_request(chan);
 221        kfree(chan->desc);
 222        chan->desc = NULL;
 223        chan->xfer_err = false;
 224        vchan_get_all_descriptors(&chan->vchan, &head);
 225        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 226        vchan_dma_desc_free_list(&chan->vchan, &head);
 227
 228        return 0;
 229}
 230
 231static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
 232{
 233        struct pdma_regs *regs = &chan->regs;
 234        u32 v;
 235
 236        v = PDMA_CLAIM_MASK |
 237                PDMA_ENABLE_DONE_INT_MASK |
 238                PDMA_ENABLE_ERR_INT_MASK |
 239                PDMA_RUN_MASK;
 240
 241        writel(v, regs->ctrl);
 242}
 243
 244static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
 245{
 246        struct sf_pdma_desc *desc = chan->desc;
 247        struct pdma_regs *regs = &chan->regs;
 248
 249        if (!desc) {
 250                dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
 251                return;
 252        }
 253
 254        writel(desc->xfer_type, regs->xfer_type);
 255        writeq(desc->xfer_size, regs->xfer_size);
 256        writeq(desc->dst_addr, regs->dst_addr);
 257        writeq(desc->src_addr, regs->src_addr);
 258
 259        chan->desc = desc;
 260        chan->status = DMA_IN_PROGRESS;
 261        sf_pdma_enable_request(chan);
 262}
 263
 264static void sf_pdma_issue_pending(struct dma_chan *dchan)
 265{
 266        struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
 267        unsigned long flags;
 268
 269        spin_lock_irqsave(&chan->vchan.lock, flags);
 270
 271        if (vchan_issue_pending(&chan->vchan) && chan->desc)
 272                sf_pdma_xfer_desc(chan);
 273
 274        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 275}
 276
 277static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
 278{
 279        struct sf_pdma_desc *desc;
 280
 281        desc = to_sf_pdma_desc(vdesc);
 282        desc->in_use = false;
 283}
 284
 285static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
 286{
 287        struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
 288        unsigned long flags;
 289
 290        spin_lock_irqsave(&chan->lock, flags);
 291        if (chan->xfer_err) {
 292                chan->retries = MAX_RETRY;
 293                chan->status = DMA_COMPLETE;
 294                chan->xfer_err = false;
 295        }
 296        spin_unlock_irqrestore(&chan->lock, flags);
 297
 298        spin_lock_irqsave(&chan->vchan.lock, flags);
 299        list_del(&chan->desc->vdesc.node);
 300        vchan_cookie_complete(&chan->desc->vdesc);
 301        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 302}
 303
 304static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
 305{
 306        struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
 307        struct sf_pdma_desc *desc = chan->desc;
 308        unsigned long flags;
 309
 310        spin_lock_irqsave(&chan->lock, flags);
 311        if (chan->retries <= 0) {
 312                /* fail to recover */
 313                spin_unlock_irqrestore(&chan->lock, flags);
 314                dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
 315        } else {
 316                /* retry */
 317                chan->retries--;
 318                chan->xfer_err = true;
 319                chan->status = DMA_ERROR;
 320
 321                sf_pdma_enable_request(chan);
 322                spin_unlock_irqrestore(&chan->lock, flags);
 323        }
 324}
 325
 326static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
 327{
 328        struct sf_pdma_chan *chan = dev_id;
 329        struct pdma_regs *regs = &chan->regs;
 330        u64 residue;
 331
 332        spin_lock(&chan->vchan.lock);
 333        writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
 334        residue = readq(regs->residue);
 335
 336        if (!residue) {
 337                tasklet_hi_schedule(&chan->done_tasklet);
 338        } else {
 339                /* submit next trascatioin if possible */
 340                struct sf_pdma_desc *desc = chan->desc;
 341
 342                desc->src_addr += desc->xfer_size - residue;
 343                desc->dst_addr += desc->xfer_size - residue;
 344                desc->xfer_size = residue;
 345
 346                sf_pdma_xfer_desc(chan);
 347        }
 348
 349        spin_unlock(&chan->vchan.lock);
 350
 351        return IRQ_HANDLED;
 352}
 353
 354static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
 355{
 356        struct sf_pdma_chan *chan = dev_id;
 357        struct pdma_regs *regs = &chan->regs;
 358
 359        spin_lock(&chan->lock);
 360        writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
 361        spin_unlock(&chan->lock);
 362
 363        tasklet_schedule(&chan->err_tasklet);
 364
 365        return IRQ_HANDLED;
 366}
 367
 368/**
 369 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
 370 * @pdev: pointer of platform_device
 371 * @pdma: pointer of PDMA engine. Caller should check NULL
 372 *
 373 * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
 374 * make sure the pointer passed in are non-NULL. This function should be called
 375 * only one time during the device probe.
 376 *
 377 * Context: Any context.
 378 *
 379 * Return:
 380 * * 0          - OK to init all IRQ handlers
 381 * * -EINVAL    - Fail to request IRQ
 382 */
 383static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
 384{
 385        int irq, r, i;
 386        struct sf_pdma_chan *chan;
 387
 388        for (i = 0; i < pdma->n_chans; i++) {
 389                chan = &pdma->chans[i];
 390
 391                irq = platform_get_irq(pdev, i * 2);
 392                if (irq < 0) {
 393                        dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
 394                        return -EINVAL;
 395                }
 396
 397                r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
 398                                     dev_name(&pdev->dev), (void *)chan);
 399                if (r) {
 400                        dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
 401                        return -EINVAL;
 402                }
 403
 404                chan->txirq = irq;
 405
 406                irq = platform_get_irq(pdev, (i * 2) + 1);
 407                if (irq < 0) {
 408                        dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
 409                        return -EINVAL;
 410                }
 411
 412                r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
 413                                     dev_name(&pdev->dev), (void *)chan);
 414                if (r) {
 415                        dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
 416                        return -EINVAL;
 417                }
 418
 419                chan->errirq = irq;
 420        }
 421
 422        return 0;
 423}
 424
 425/**
 426 * sf_pdma_setup_chans() - Init settings of each channel
 427 * @pdma: pointer of PDMA engine. Caller should check NULL
 428 *
 429 * Initialize all data structure and register base. Caller should make sure
 430 * the pointer passed in are non-NULL. This function should be called only
 431 * one time during the device probe.
 432 *
 433 * Context: Any context.
 434 *
 435 * Return: none
 436 */
 437static void sf_pdma_setup_chans(struct sf_pdma *pdma)
 438{
 439        int i;
 440        struct sf_pdma_chan *chan;
 441
 442        INIT_LIST_HEAD(&pdma->dma_dev.channels);
 443
 444        for (i = 0; i < pdma->n_chans; i++) {
 445                chan = &pdma->chans[i];
 446
 447                chan->regs.ctrl =
 448                        SF_PDMA_REG_BASE(i) + PDMA_CTRL;
 449                chan->regs.xfer_type =
 450                        SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
 451                chan->regs.xfer_size =
 452                        SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
 453                chan->regs.dst_addr =
 454                        SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
 455                chan->regs.src_addr =
 456                        SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
 457                chan->regs.act_type =
 458                        SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
 459                chan->regs.residue =
 460                        SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
 461                chan->regs.cur_dst_addr =
 462                        SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
 463                chan->regs.cur_src_addr =
 464                        SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
 465
 466                chan->pdma = pdma;
 467                chan->pm_state = RUNNING;
 468                chan->slave_id = i;
 469                chan->xfer_err = false;
 470                spin_lock_init(&chan->lock);
 471
 472                chan->vchan.desc_free = sf_pdma_free_desc;
 473                vchan_init(&chan->vchan, &pdma->dma_dev);
 474
 475                writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
 476
 477                tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
 478                tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
 479        }
 480}
 481
 482static int sf_pdma_probe(struct platform_device *pdev)
 483{
 484        struct sf_pdma *pdma;
 485        struct sf_pdma_chan *chan;
 486        struct resource *res;
 487        int len, chans;
 488        int ret;
 489        const enum dma_slave_buswidth widths =
 490                DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
 491                DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
 492                DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
 493                DMA_SLAVE_BUSWIDTH_64_BYTES;
 494
 495        chans = PDMA_NR_CH;
 496        len = sizeof(*pdma) + sizeof(*chan) * chans;
 497        pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
 498        if (!pdma)
 499                return -ENOMEM;
 500
 501        pdma->n_chans = chans;
 502
 503        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 504        pdma->membase = devm_ioremap_resource(&pdev->dev, res);
 505        if (IS_ERR(pdma->membase))
 506                return PTR_ERR(pdma->membase);
 507
 508        ret = sf_pdma_irq_init(pdev, pdma);
 509        if (ret)
 510                return ret;
 511
 512        sf_pdma_setup_chans(pdma);
 513
 514        pdma->dma_dev.dev = &pdev->dev;
 515
 516        /* Setup capability */
 517        dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
 518        pdma->dma_dev.copy_align = 2;
 519        pdma->dma_dev.src_addr_widths = widths;
 520        pdma->dma_dev.dst_addr_widths = widths;
 521        pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
 522        pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 523        pdma->dma_dev.descriptor_reuse = true;
 524
 525        /* Setup DMA APIs */
 526        pdma->dma_dev.device_alloc_chan_resources =
 527                sf_pdma_alloc_chan_resources;
 528        pdma->dma_dev.device_free_chan_resources =
 529                sf_pdma_free_chan_resources;
 530        pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
 531        pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
 532        pdma->dma_dev.device_config = sf_pdma_slave_config;
 533        pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
 534        pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
 535
 536        platform_set_drvdata(pdev, pdma);
 537
 538        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 539        if (ret)
 540                dev_warn(&pdev->dev,
 541                         "Failed to set DMA mask. Fall back to default.\n");
 542
 543        ret = dma_async_device_register(&pdma->dma_dev);
 544        if (ret) {
 545                dev_err(&pdev->dev,
 546                        "Can't register SiFive Platform DMA. (%d)\n", ret);
 547                return ret;
 548        }
 549
 550        return 0;
 551}
 552
 553static int sf_pdma_remove(struct platform_device *pdev)
 554{
 555        struct sf_pdma *pdma = platform_get_drvdata(pdev);
 556        struct sf_pdma_chan *ch;
 557        int i;
 558
 559        for (i = 0; i < PDMA_NR_CH; i++) {
 560                ch = &pdma->chans[i];
 561
 562                devm_free_irq(&pdev->dev, ch->txirq, ch);
 563                devm_free_irq(&pdev->dev, ch->errirq, ch);
 564                list_del(&ch->vchan.chan.device_node);
 565                tasklet_kill(&ch->vchan.task);
 566                tasklet_kill(&ch->done_tasklet);
 567                tasklet_kill(&ch->err_tasklet);
 568        }
 569
 570        dma_async_device_unregister(&pdma->dma_dev);
 571
 572        return 0;
 573}
 574
 575static const struct of_device_id sf_pdma_dt_ids[] = {
 576        { .compatible = "sifive,fu540-c000-pdma" },
 577        {},
 578};
 579MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
 580
 581static struct platform_driver sf_pdma_driver = {
 582        .probe          = sf_pdma_probe,
 583        .remove         = sf_pdma_remove,
 584        .driver         = {
 585                .name   = "sf-pdma",
 586                .of_match_table = sf_pdma_dt_ids,
 587        },
 588};
 589
 590static int __init sf_pdma_init(void)
 591{
 592        return platform_driver_register(&sf_pdma_driver);
 593}
 594
 595static void __exit sf_pdma_exit(void)
 596{
 597        platform_driver_unregister(&sf_pdma_driver);
 598}
 599
 600/* do early init */
 601subsys_initcall(sf_pdma_init);
 602module_exit(sf_pdma_exit);
 603
 604MODULE_LICENSE("GPL v2");
 605MODULE_DESCRIPTION("SiFive Platform DMA driver");
 606MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
 607