linux/drivers/dma/altera-msgdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * DMA driver for Altera mSGDMA IP core
   4 *
   5 * Copyright (C) 2017 Stefan Roese <sr@denx.de>
   6 *
   7 * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
   8 * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/delay.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/iopoll.h>
  19#include <linux/module.h>
  20#include <linux/platform_device.h>
  21#include <linux/slab.h>
  22
  23#include "dmaengine.h"
  24
  25#define MSGDMA_MAX_TRANS_LEN            U32_MAX
  26#define MSGDMA_DESC_NUM                 1024
  27
  28/**
  29 * struct msgdma_extended_desc - implements an extended descriptor
  30 * @read_addr_lo: data buffer source address low bits
  31 * @write_addr_lo: data buffer destination address low bits
  32 * @len: the number of bytes to transfer per descriptor
  33 * @burst_seq_num: bit 31:24 write burst
  34 *                 bit 23:16 read burst
  35 *                 bit 15:00 sequence number
  36 * @stride: bit 31:16 write stride
  37 *          bit 15:00 read stride
  38 * @read_addr_hi: data buffer source address high bits
  39 * @write_addr_hi: data buffer destination address high bits
  40 * @control: characteristics of the transfer
  41 */
  42struct msgdma_extended_desc {
  43        u32 read_addr_lo;
  44        u32 write_addr_lo;
  45        u32 len;
  46        u32 burst_seq_num;
  47        u32 stride;
  48        u32 read_addr_hi;
  49        u32 write_addr_hi;
  50        u32 control;
  51};
  52
  53/* mSGDMA descriptor control field bit definitions */
  54#define MSGDMA_DESC_CTL_SET_CH(x)       ((x) & 0xff)
  55#define MSGDMA_DESC_CTL_GEN_SOP         BIT(8)
  56#define MSGDMA_DESC_CTL_GEN_EOP         BIT(9)
  57#define MSGDMA_DESC_CTL_PARK_READS      BIT(10)
  58#define MSGDMA_DESC_CTL_PARK_WRITES     BIT(11)
  59#define MSGDMA_DESC_CTL_END_ON_EOP      BIT(12)
  60#define MSGDMA_DESC_CTL_END_ON_LEN      BIT(13)
  61#define MSGDMA_DESC_CTL_TR_COMP_IRQ     BIT(14)
  62#define MSGDMA_DESC_CTL_EARLY_IRQ       BIT(15)
  63#define MSGDMA_DESC_CTL_TR_ERR_IRQ      GENMASK(23, 16)
  64#define MSGDMA_DESC_CTL_EARLY_DONE      BIT(24)
  65
  66/*
  67 * Writing "1" the "go" bit commits the entire descriptor into the
  68 * descriptor FIFO(s)
  69 */
  70#define MSGDMA_DESC_CTL_GO              BIT(31)
  71
  72/* Tx buffer control flags */
  73#define MSGDMA_DESC_CTL_TX_FIRST        (MSGDMA_DESC_CTL_GEN_SOP |      \
  74                                         MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  75                                         MSGDMA_DESC_CTL_GO)
  76
  77#define MSGDMA_DESC_CTL_TX_MIDDLE       (MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  78                                         MSGDMA_DESC_CTL_GO)
  79
  80#define MSGDMA_DESC_CTL_TX_LAST         (MSGDMA_DESC_CTL_GEN_EOP |      \
  81                                         MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
  82                                         MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  83                                         MSGDMA_DESC_CTL_GO)
  84
  85#define MSGDMA_DESC_CTL_TX_SINGLE       (MSGDMA_DESC_CTL_GEN_SOP |      \
  86                                         MSGDMA_DESC_CTL_GEN_EOP |      \
  87                                         MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
  88                                         MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  89                                         MSGDMA_DESC_CTL_GO)
  90
  91#define MSGDMA_DESC_CTL_RX_SINGLE       (MSGDMA_DESC_CTL_END_ON_EOP |   \
  92                                         MSGDMA_DESC_CTL_END_ON_LEN |   \
  93                                         MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
  94                                         MSGDMA_DESC_CTL_EARLY_IRQ |    \
  95                                         MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
  96                                         MSGDMA_DESC_CTL_GO)
  97
  98/* mSGDMA extended descriptor stride definitions */
  99#define MSGDMA_DESC_STRIDE_RD           0x00000001
 100#define MSGDMA_DESC_STRIDE_WR           0x00010000
 101#define MSGDMA_DESC_STRIDE_RW           0x00010001
 102
 103/* mSGDMA dispatcher control and status register map */
 104#define MSGDMA_CSR_STATUS               0x00    /* Read / Clear */
 105#define MSGDMA_CSR_CONTROL              0x04    /* Read / Write */
 106#define MSGDMA_CSR_RW_FILL_LEVEL        0x08    /* 31:16 - write fill level */
 107                                                /* 15:00 - read fill level */
 108#define MSGDMA_CSR_RESP_FILL_LEVEL      0x0c    /* response FIFO fill level */
 109#define MSGDMA_CSR_RW_SEQ_NUM           0x10    /* 31:16 - write seq number */
 110                                                /* 15:00 - read seq number */
 111
 112/* mSGDMA CSR status register bit definitions */
 113#define MSGDMA_CSR_STAT_BUSY                    BIT(0)
 114#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY          BIT(1)
 115#define MSGDMA_CSR_STAT_DESC_BUF_FULL           BIT(2)
 116#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY          BIT(3)
 117#define MSGDMA_CSR_STAT_RESP_BUF_FULL           BIT(4)
 118#define MSGDMA_CSR_STAT_STOPPED                 BIT(5)
 119#define MSGDMA_CSR_STAT_RESETTING               BIT(6)
 120#define MSGDMA_CSR_STAT_STOPPED_ON_ERR          BIT(7)
 121#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY        BIT(8)
 122#define MSGDMA_CSR_STAT_IRQ                     BIT(9)
 123#define MSGDMA_CSR_STAT_MASK                    GENMASK(9, 0)
 124#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ        GENMASK(8, 0)
 125
 126#define DESC_EMPTY      (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
 127                         MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
 128
 129/* mSGDMA CSR control register bit definitions */
 130#define MSGDMA_CSR_CTL_STOP                     BIT(0)
 131#define MSGDMA_CSR_CTL_RESET                    BIT(1)
 132#define MSGDMA_CSR_CTL_STOP_ON_ERR              BIT(2)
 133#define MSGDMA_CSR_CTL_STOP_ON_EARLY            BIT(3)
 134#define MSGDMA_CSR_CTL_GLOBAL_INTR              BIT(4)
 135#define MSGDMA_CSR_CTL_STOP_DESCS               BIT(5)
 136
 137/* mSGDMA CSR fill level bits */
 138#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)         (((v) & 0xffff0000) >> 16)
 139#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)         ((v) & 0x0000ffff)
 140#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)       ((v) & 0x0000ffff)
 141
 142#define MSGDMA_CSR_SEQ_NUM_GET(v)               (((v) & 0xffff0000) >> 16)
 143
 144/* mSGDMA response register map */
 145#define MSGDMA_RESP_BYTES_TRANSFERRED   0x00
 146#define MSGDMA_RESP_STATUS              0x04
 147
 148/* mSGDMA response register bit definitions */
 149#define MSGDMA_RESP_EARLY_TERM  BIT(8)
 150#define MSGDMA_RESP_ERR_MASK    0xff
 151
 152/**
 153 * struct msgdma_sw_desc - implements a sw descriptor
 154 * @async_tx: support for the async_tx api
 155 * @hw_desc: assosiated HW descriptor
 156 * @node: node to move from the free list to the tx list
 157 * @tx_list: transmit list node
 158 */
 159struct msgdma_sw_desc {
 160        struct dma_async_tx_descriptor async_tx;
 161        struct msgdma_extended_desc hw_desc;
 162        struct list_head node;
 163        struct list_head tx_list;
 164};
 165
 166/*
 167 * struct msgdma_device - DMA device structure
 168 */
 169struct msgdma_device {
 170        spinlock_t lock;
 171        struct device *dev;
 172        struct tasklet_struct irq_tasklet;
 173        struct list_head pending_list;
 174        struct list_head free_list;
 175        struct list_head active_list;
 176        struct list_head done_list;
 177        u32 desc_free_cnt;
 178        bool idle;
 179
 180        struct dma_device dmadev;
 181        struct dma_chan dmachan;
 182        dma_addr_t hw_desq;
 183        struct msgdma_sw_desc *sw_desq;
 184        unsigned int npendings;
 185
 186        struct dma_slave_config slave_cfg;
 187
 188        int irq;
 189
 190        /* mSGDMA controller */
 191        void __iomem *csr;
 192
 193        /* mSGDMA descriptors */
 194        void __iomem *desc;
 195
 196        /* mSGDMA response */
 197        void __iomem *resp;
 198};
 199
 200#define to_mdev(chan)   container_of(chan, struct msgdma_device, dmachan)
 201#define tx_to_desc(tx)  container_of(tx, struct msgdma_sw_desc, async_tx)
 202
 203/**
 204 * msgdma_get_descriptor - Get the sw descriptor from the pool
 205 * @mdev: Pointer to the Altera mSGDMA device structure
 206 *
 207 * Return: The sw descriptor
 208 */
 209static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
 210{
 211        struct msgdma_sw_desc *desc;
 212        unsigned long flags;
 213
 214        spin_lock_irqsave(&mdev->lock, flags);
 215        desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
 216        list_del(&desc->node);
 217        spin_unlock_irqrestore(&mdev->lock, flags);
 218
 219        INIT_LIST_HEAD(&desc->tx_list);
 220
 221        return desc;
 222}
 223
 224/**
 225 * msgdma_free_descriptor - Issue pending transactions
 226 * @mdev: Pointer to the Altera mSGDMA device structure
 227 * @desc: Transaction descriptor pointer
 228 */
 229static void msgdma_free_descriptor(struct msgdma_device *mdev,
 230                                   struct msgdma_sw_desc *desc)
 231{
 232        struct msgdma_sw_desc *child, *next;
 233
 234        mdev->desc_free_cnt++;
 235        list_add_tail(&desc->node, &mdev->free_list);
 236        list_for_each_entry_safe(child, next, &desc->tx_list, node) {
 237                mdev->desc_free_cnt++;
 238                list_move_tail(&child->node, &mdev->free_list);
 239        }
 240}
 241
 242/**
 243 * msgdma_free_desc_list - Free descriptors list
 244 * @mdev: Pointer to the Altera mSGDMA device structure
 245 * @list: List to parse and delete the descriptor
 246 */
 247static void msgdma_free_desc_list(struct msgdma_device *mdev,
 248                                  struct list_head *list)
 249{
 250        struct msgdma_sw_desc *desc, *next;
 251
 252        list_for_each_entry_safe(desc, next, list, node)
 253                msgdma_free_descriptor(mdev, desc);
 254}
 255
 256/**
 257 * msgdma_desc_config - Configure the descriptor
 258 * @desc: Hw descriptor pointer
 259 * @dst: Destination buffer address
 260 * @src: Source buffer address
 261 * @len: Transfer length
 262 * @stride: Read/write stride value to set
 263 */
 264static void msgdma_desc_config(struct msgdma_extended_desc *desc,
 265                               dma_addr_t dst, dma_addr_t src, size_t len,
 266                               u32 stride)
 267{
 268        /* Set lower 32bits of src & dst addresses in the descriptor */
 269        desc->read_addr_lo = lower_32_bits(src);
 270        desc->write_addr_lo = lower_32_bits(dst);
 271
 272        /* Set upper 32bits of src & dst addresses in the descriptor */
 273        desc->read_addr_hi = upper_32_bits(src);
 274        desc->write_addr_hi = upper_32_bits(dst);
 275
 276        desc->len = len;
 277        desc->stride = stride;
 278        desc->burst_seq_num = 0;        /* 0 will result in max burst length */
 279
 280        /*
 281         * Don't set interrupt on xfer end yet, this will be done later
 282         * for the "last" descriptor
 283         */
 284        desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
 285                MSGDMA_DESC_CTL_END_ON_LEN;
 286}
 287
 288/**
 289 * msgdma_desc_config_eod - Mark the descriptor as end descriptor
 290 * @desc: Hw descriptor pointer
 291 */
 292static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
 293{
 294        desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
 295}
 296
 297/**
 298 * msgdma_tx_submit - Submit DMA transaction
 299 * @tx: Async transaction descriptor pointer
 300 *
 301 * Return: cookie value
 302 */
 303static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
 304{
 305        struct msgdma_device *mdev = to_mdev(tx->chan);
 306        struct msgdma_sw_desc *new;
 307        dma_cookie_t cookie;
 308        unsigned long flags;
 309
 310        new = tx_to_desc(tx);
 311        spin_lock_irqsave(&mdev->lock, flags);
 312        cookie = dma_cookie_assign(tx);
 313
 314        list_add_tail(&new->node, &mdev->pending_list);
 315        spin_unlock_irqrestore(&mdev->lock, flags);
 316
 317        return cookie;
 318}
 319
 320/**
 321 * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
 322 * @dchan: DMA channel
 323 * @dma_dst: Destination buffer address
 324 * @dma_src: Source buffer address
 325 * @len: Transfer length
 326 * @flags: transfer ack flags
 327 *
 328 * Return: Async transaction descriptor on success and NULL on failure
 329 */
 330static struct dma_async_tx_descriptor *
 331msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
 332                   dma_addr_t dma_src, size_t len, ulong flags)
 333{
 334        struct msgdma_device *mdev = to_mdev(dchan);
 335        struct msgdma_sw_desc *new, *first = NULL;
 336        struct msgdma_extended_desc *desc;
 337        size_t copy;
 338        u32 desc_cnt;
 339        unsigned long irqflags;
 340
 341        desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
 342
 343        spin_lock_irqsave(&mdev->lock, irqflags);
 344        if (desc_cnt > mdev->desc_free_cnt) {
 345                spin_unlock_irqrestore(&mdev->lock, irqflags);
 346                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
 347                return NULL;
 348        }
 349        mdev->desc_free_cnt -= desc_cnt;
 350        spin_unlock_irqrestore(&mdev->lock, irqflags);
 351
 352        do {
 353                /* Allocate and populate the descriptor */
 354                new = msgdma_get_descriptor(mdev);
 355
 356                copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
 357                desc = &new->hw_desc;
 358                msgdma_desc_config(desc, dma_dst, dma_src, copy,
 359                                   MSGDMA_DESC_STRIDE_RW);
 360                len -= copy;
 361                dma_src += copy;
 362                dma_dst += copy;
 363                if (!first)
 364                        first = new;
 365                else
 366                        list_add_tail(&new->node, &first->tx_list);
 367        } while (len);
 368
 369        msgdma_desc_config_eod(desc);
 370        async_tx_ack(&first->async_tx);
 371        first->async_tx.flags = flags;
 372
 373        return &first->async_tx;
 374}
 375
 376/**
 377 * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
 378 *
 379 * @dchan: DMA channel
 380 * @sgl: Destination scatter list
 381 * @sg_len: Number of entries in destination scatter list
 382 * @dir: DMA transfer direction
 383 * @flags: transfer ack flags
 384 * @context: transfer context (unused)
 385 */
 386static struct dma_async_tx_descriptor *
 387msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 388                     unsigned int sg_len, enum dma_transfer_direction dir,
 389                     unsigned long flags, void *context)
 390
 391{
 392        struct msgdma_device *mdev = to_mdev(dchan);
 393        struct dma_slave_config *cfg = &mdev->slave_cfg;
 394        struct msgdma_sw_desc *new, *first = NULL;
 395        void *desc = NULL;
 396        size_t len, avail;
 397        dma_addr_t dma_dst, dma_src;
 398        u32 desc_cnt = 0, i;
 399        struct scatterlist *sg;
 400        u32 stride;
 401        unsigned long irqflags;
 402
 403        for_each_sg(sgl, sg, sg_len, i)
 404                desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
 405
 406        spin_lock_irqsave(&mdev->lock, irqflags);
 407        if (desc_cnt > mdev->desc_free_cnt) {
 408                spin_unlock_irqrestore(&mdev->lock, irqflags);
 409                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
 410                return NULL;
 411        }
 412        mdev->desc_free_cnt -= desc_cnt;
 413        spin_unlock_irqrestore(&mdev->lock, irqflags);
 414
 415        avail = sg_dma_len(sgl);
 416
 417        /* Run until we are out of scatterlist entries */
 418        while (true) {
 419                /* Allocate and populate the descriptor */
 420                new = msgdma_get_descriptor(mdev);
 421
 422                desc = &new->hw_desc;
 423                len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
 424
 425                if (dir == DMA_MEM_TO_DEV) {
 426                        dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
 427                        dma_dst = cfg->dst_addr;
 428                        stride = MSGDMA_DESC_STRIDE_RD;
 429                } else {
 430                        dma_src = cfg->src_addr;
 431                        dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
 432                        stride = MSGDMA_DESC_STRIDE_WR;
 433                }
 434                msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
 435                avail -= len;
 436
 437                if (!first)
 438                        first = new;
 439                else
 440                        list_add_tail(&new->node, &first->tx_list);
 441
 442                /* Fetch the next scatterlist entry */
 443                if (avail == 0) {
 444                        if (sg_len == 0)
 445                                break;
 446                        sgl = sg_next(sgl);
 447                        if (sgl == NULL)
 448                                break;
 449                        sg_len--;
 450                        avail = sg_dma_len(sgl);
 451                }
 452        }
 453
 454        msgdma_desc_config_eod(desc);
 455        first->async_tx.flags = flags;
 456
 457        return &first->async_tx;
 458}
 459
 460static int msgdma_dma_config(struct dma_chan *dchan,
 461                             struct dma_slave_config *config)
 462{
 463        struct msgdma_device *mdev = to_mdev(dchan);
 464
 465        memcpy(&mdev->slave_cfg, config, sizeof(*config));
 466
 467        return 0;
 468}
 469
 470static void msgdma_reset(struct msgdma_device *mdev)
 471{
 472        u32 val;
 473        int ret;
 474
 475        /* Reset mSGDMA */
 476        iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
 477        iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
 478
 479        ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
 480                                 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
 481                                 1, 10000);
 482        if (ret)
 483                dev_err(mdev->dev, "DMA channel did not reset\n");
 484
 485        /* Clear all status bits */
 486        iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
 487
 488        /* Enable the DMA controller including interrupts */
 489        iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
 490                  MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
 491
 492        mdev->idle = true;
 493};
 494
 495static void msgdma_copy_one(struct msgdma_device *mdev,
 496                            struct msgdma_sw_desc *desc)
 497{
 498        void __iomem *hw_desc = mdev->desc;
 499
 500        /*
 501         * Check if the DESC FIFO it not full. If its full, we need to wait
 502         * for at least one entry to become free again
 503         */
 504        while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
 505               MSGDMA_CSR_STAT_DESC_BUF_FULL)
 506                mdelay(1);
 507
 508        /*
 509         * The descriptor needs to get copied into the descriptor FIFO
 510         * of the DMA controller. The descriptor will get flushed to the
 511         * FIFO, once the last word (control word) is written. Since we
 512         * are not 100% sure that memcpy() writes all word in the "correct"
 513         * oder (address from low to high) on all architectures, we make
 514         * sure this control word is written last by single coding it and
 515         * adding some write-barriers here.
 516         */
 517        memcpy((void __force *)hw_desc, &desc->hw_desc,
 518               sizeof(desc->hw_desc) - sizeof(u32));
 519
 520        /* Write control word last to flush this descriptor into the FIFO */
 521        mdev->idle = false;
 522        wmb();
 523        iowrite32(desc->hw_desc.control, hw_desc +
 524                  offsetof(struct msgdma_extended_desc, control));
 525        wmb();
 526}
 527
 528/**
 529 * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
 530 * @mdev: Pointer to the Altera mSGDMA device structure
 531 * @desc: Transaction descriptor pointer
 532 */
 533static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
 534                                     struct msgdma_sw_desc *desc)
 535{
 536        struct msgdma_sw_desc *sdesc, *next;
 537
 538        msgdma_copy_one(mdev, desc);
 539
 540        list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
 541                msgdma_copy_one(mdev, sdesc);
 542}
 543
 544/**
 545 * msgdma_start_transfer - Initiate the new transfer
 546 * @mdev: Pointer to the Altera mSGDMA device structure
 547 */
 548static void msgdma_start_transfer(struct msgdma_device *mdev)
 549{
 550        struct msgdma_sw_desc *desc;
 551
 552        if (!mdev->idle)
 553                return;
 554
 555        desc = list_first_entry_or_null(&mdev->pending_list,
 556                                        struct msgdma_sw_desc, node);
 557        if (!desc)
 558                return;
 559
 560        list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
 561        msgdma_copy_desc_to_fifo(mdev, desc);
 562}
 563
 564/**
 565 * msgdma_issue_pending - Issue pending transactions
 566 * @chan: DMA channel pointer
 567 */
 568static void msgdma_issue_pending(struct dma_chan *chan)
 569{
 570        struct msgdma_device *mdev = to_mdev(chan);
 571        unsigned long flags;
 572
 573        spin_lock_irqsave(&mdev->lock, flags);
 574        msgdma_start_transfer(mdev);
 575        spin_unlock_irqrestore(&mdev->lock, flags);
 576}
 577
 578/**
 579 * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
 580 * @mdev: Pointer to the Altera mSGDMA device structure
 581 */
 582static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
 583{
 584        struct msgdma_sw_desc *desc, *next;
 585
 586        list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
 587                dma_async_tx_callback callback;
 588                void *callback_param;
 589
 590                list_del(&desc->node);
 591
 592                callback = desc->async_tx.callback;
 593                callback_param = desc->async_tx.callback_param;
 594                if (callback) {
 595                        spin_unlock(&mdev->lock);
 596                        callback(callback_param);
 597                        spin_lock(&mdev->lock);
 598                }
 599
 600                /* Run any dependencies, then free the descriptor */
 601                msgdma_free_descriptor(mdev, desc);
 602        }
 603}
 604
 605/**
 606 * msgdma_complete_descriptor - Mark the active descriptor as complete
 607 * @mdev: Pointer to the Altera mSGDMA device structure
 608 */
 609static void msgdma_complete_descriptor(struct msgdma_device *mdev)
 610{
 611        struct msgdma_sw_desc *desc;
 612
 613        desc = list_first_entry_or_null(&mdev->active_list,
 614                                        struct msgdma_sw_desc, node);
 615        if (!desc)
 616                return;
 617        list_del(&desc->node);
 618        dma_cookie_complete(&desc->async_tx);
 619        list_add_tail(&desc->node, &mdev->done_list);
 620}
 621
 622/**
 623 * msgdma_free_descriptors - Free channel descriptors
 624 * @mdev: Pointer to the Altera mSGDMA device structure
 625 */
 626static void msgdma_free_descriptors(struct msgdma_device *mdev)
 627{
 628        msgdma_free_desc_list(mdev, &mdev->active_list);
 629        msgdma_free_desc_list(mdev, &mdev->pending_list);
 630        msgdma_free_desc_list(mdev, &mdev->done_list);
 631}
 632
 633/**
 634 * msgdma_free_chan_resources - Free channel resources
 635 * @dchan: DMA channel pointer
 636 */
 637static void msgdma_free_chan_resources(struct dma_chan *dchan)
 638{
 639        struct msgdma_device *mdev = to_mdev(dchan);
 640        unsigned long flags;
 641
 642        spin_lock_irqsave(&mdev->lock, flags);
 643        msgdma_free_descriptors(mdev);
 644        spin_unlock_irqrestore(&mdev->lock, flags);
 645        kfree(mdev->sw_desq);
 646}
 647
 648/**
 649 * msgdma_alloc_chan_resources - Allocate channel resources
 650 * @dchan: DMA channel
 651 *
 652 * Return: Number of descriptors on success and failure value on error
 653 */
 654static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
 655{
 656        struct msgdma_device *mdev = to_mdev(dchan);
 657        struct msgdma_sw_desc *desc;
 658        int i;
 659
 660        mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
 661        if (!mdev->sw_desq)
 662                return -ENOMEM;
 663
 664        mdev->idle = true;
 665        mdev->desc_free_cnt = MSGDMA_DESC_NUM;
 666
 667        INIT_LIST_HEAD(&mdev->free_list);
 668
 669        for (i = 0; i < MSGDMA_DESC_NUM; i++) {
 670                desc = mdev->sw_desq + i;
 671                dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
 672                desc->async_tx.tx_submit = msgdma_tx_submit;
 673                list_add_tail(&desc->node, &mdev->free_list);
 674        }
 675
 676        return MSGDMA_DESC_NUM;
 677}
 678
 679/**
 680 * msgdma_tasklet - Schedule completion tasklet
 681 * @data: Pointer to the Altera sSGDMA channel structure
 682 */
 683static void msgdma_tasklet(unsigned long data)
 684{
 685        struct msgdma_device *mdev = (struct msgdma_device *)data;
 686        u32 count;
 687        u32 __maybe_unused size;
 688        u32 __maybe_unused status;
 689        unsigned long flags;
 690
 691        spin_lock_irqsave(&mdev->lock, flags);
 692
 693        /* Read number of responses that are available */
 694        count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
 695        dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
 696                __func__, __LINE__, count);
 697
 698        while (count--) {
 699                /*
 700                 * Read both longwords to purge this response from the FIFO
 701                 * On Avalon-MM implementations, size and status do not
 702                 * have any real values, like transferred bytes or error
 703                 * bits. So we need to just drop these values.
 704                 */
 705                size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
 706                status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
 707
 708                msgdma_complete_descriptor(mdev);
 709                msgdma_chan_desc_cleanup(mdev);
 710        }
 711
 712        spin_unlock_irqrestore(&mdev->lock, flags);
 713}
 714
 715/**
 716 * msgdma_irq_handler - Altera mSGDMA Interrupt handler
 717 * @irq: IRQ number
 718 * @data: Pointer to the Altera mSGDMA device structure
 719 *
 720 * Return: IRQ_HANDLED/IRQ_NONE
 721 */
 722static irqreturn_t msgdma_irq_handler(int irq, void *data)
 723{
 724        struct msgdma_device *mdev = data;
 725        u32 status;
 726
 727        status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
 728        if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
 729                /* Start next transfer if the DMA controller is idle */
 730                spin_lock(&mdev->lock);
 731                mdev->idle = true;
 732                msgdma_start_transfer(mdev);
 733                spin_unlock(&mdev->lock);
 734        }
 735
 736        tasklet_schedule(&mdev->irq_tasklet);
 737
 738        /* Clear interrupt in mSGDMA controller */
 739        iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
 740
 741        return IRQ_HANDLED;
 742}
 743
 744/**
 745 * msgdma_chan_remove - Channel remove function
 746 * @mdev: Pointer to the Altera mSGDMA device structure
 747 */
 748static void msgdma_dev_remove(struct msgdma_device *mdev)
 749{
 750        if (!mdev)
 751                return;
 752
 753        devm_free_irq(mdev->dev, mdev->irq, mdev);
 754        tasklet_kill(&mdev->irq_tasklet);
 755        list_del(&mdev->dmachan.device_node);
 756}
 757
 758static int request_and_map(struct platform_device *pdev, const char *name,
 759                           struct resource **res, void __iomem **ptr)
 760{
 761        struct resource *region;
 762        struct device *device = &pdev->dev;
 763
 764        *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 765        if (*res == NULL) {
 766                dev_err(device, "resource %s not defined\n", name);
 767                return -ENODEV;
 768        }
 769
 770        region = devm_request_mem_region(device, (*res)->start,
 771                                         resource_size(*res), dev_name(device));
 772        if (region == NULL) {
 773                dev_err(device, "unable to request %s\n", name);
 774                return -EBUSY;
 775        }
 776
 777        *ptr = devm_ioremap(device, region->start,
 778                                    resource_size(region));
 779        if (*ptr == NULL) {
 780                dev_err(device, "ioremap of %s failed!", name);
 781                return -ENOMEM;
 782        }
 783
 784        return 0;
 785}
 786
 787/**
 788 * msgdma_probe - Driver probe function
 789 * @pdev: Pointer to the platform_device structure
 790 *
 791 * Return: '0' on success and failure value on error
 792 */
 793static int msgdma_probe(struct platform_device *pdev)
 794{
 795        struct msgdma_device *mdev;
 796        struct dma_device *dma_dev;
 797        struct resource *dma_res;
 798        int ret;
 799
 800        mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
 801        if (!mdev)
 802                return -ENOMEM;
 803
 804        mdev->dev = &pdev->dev;
 805
 806        /* Map CSR space */
 807        ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
 808        if (ret)
 809                return ret;
 810
 811        /* Map (extended) descriptor space */
 812        ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
 813        if (ret)
 814                return ret;
 815
 816        /* Map response space */
 817        ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
 818        if (ret)
 819                return ret;
 820
 821        platform_set_drvdata(pdev, mdev);
 822
 823        /* Get interrupt nr from platform data */
 824        mdev->irq = platform_get_irq(pdev, 0);
 825        if (mdev->irq < 0)
 826                return -ENXIO;
 827
 828        ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
 829                               0, dev_name(&pdev->dev), mdev);
 830        if (ret)
 831                return ret;
 832
 833        tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
 834
 835        dma_cookie_init(&mdev->dmachan);
 836
 837        spin_lock_init(&mdev->lock);
 838
 839        INIT_LIST_HEAD(&mdev->active_list);
 840        INIT_LIST_HEAD(&mdev->pending_list);
 841        INIT_LIST_HEAD(&mdev->done_list);
 842        INIT_LIST_HEAD(&mdev->free_list);
 843
 844        dma_dev = &mdev->dmadev;
 845
 846        /* Set DMA capabilities */
 847        dma_cap_zero(dma_dev->cap_mask);
 848        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 849        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 850
 851        dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 852        dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 853        dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
 854                BIT(DMA_MEM_TO_MEM);
 855        dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 856
 857        /* Init DMA link list */
 858        INIT_LIST_HEAD(&dma_dev->channels);
 859
 860        /* Set base routines */
 861        dma_dev->device_tx_status = dma_cookie_status;
 862        dma_dev->device_issue_pending = msgdma_issue_pending;
 863        dma_dev->dev = &pdev->dev;
 864
 865        dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
 866        dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
 867        dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
 868        dma_dev->device_config = msgdma_dma_config;
 869
 870        dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
 871        dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
 872
 873        mdev->dmachan.device = dma_dev;
 874        list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
 875
 876        /* Set DMA mask to 64 bits */
 877        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 878        if (ret) {
 879                dev_warn(&pdev->dev, "unable to set coherent mask to 64");
 880                ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 881                if (ret)
 882                        goto fail;
 883        }
 884
 885        msgdma_reset(mdev);
 886
 887        ret = dma_async_device_register(dma_dev);
 888        if (ret)
 889                goto fail;
 890
 891        dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
 892
 893        return 0;
 894
 895fail:
 896        msgdma_dev_remove(mdev);
 897
 898        return ret;
 899}
 900
 901/**
 902 * msgdma_dma_remove - Driver remove function
 903 * @pdev: Pointer to the platform_device structure
 904 *
 905 * Return: Always '0'
 906 */
 907static int msgdma_remove(struct platform_device *pdev)
 908{
 909        struct msgdma_device *mdev = platform_get_drvdata(pdev);
 910
 911        dma_async_device_unregister(&mdev->dmadev);
 912        msgdma_dev_remove(mdev);
 913
 914        dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
 915
 916        return 0;
 917}
 918
 919static struct platform_driver msgdma_driver = {
 920        .driver = {
 921                .name = "altera-msgdma",
 922        },
 923        .probe = msgdma_probe,
 924        .remove = msgdma_remove,
 925};
 926
 927module_platform_driver(msgdma_driver);
 928
 929MODULE_ALIAS("platform:altera-msgdma");
 930MODULE_DESCRIPTION("Altera mSGDMA driver");
 931MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
 932MODULE_LICENSE("GPL");
 933