linux/drivers/rapidio/devices/tsi721_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
   4 *
   5 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
   6 * Alexandre Bounine <alexandre.bounine@idt.com>
   7 */
   8
   9#include <linux/io.h>
  10#include <linux/errno.h>
  11#include <linux/init.h>
  12#include <linux/ioport.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/pci.h>
  16#include <linux/rio.h>
  17#include <linux/rio_drv.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/interrupt.h>
  20#include <linux/kfifo.h>
  21#include <linux/sched.h>
  22#include <linux/delay.h>
  23#include "../../dma/dmaengine.h"
  24
  25#include "tsi721.h"
  26
  27#ifdef CONFIG_PCI_MSI
  28static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
  29#endif
  30static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
  31
  32static unsigned int dma_desc_per_channel = 128;
  33module_param(dma_desc_per_channel, uint, S_IRUGO);
  34MODULE_PARM_DESC(dma_desc_per_channel,
  35                 "Number of DMA descriptors per channel (default: 128)");
  36
  37static unsigned int dma_txqueue_sz = 16;
  38module_param(dma_txqueue_sz, uint, S_IRUGO);
  39MODULE_PARM_DESC(dma_txqueue_sz,
  40                 "DMA Transactions Queue Size (default: 16)");
  41
  42static u8 dma_sel = 0x7f;
  43module_param(dma_sel, byte, S_IRUGO);
  44MODULE_PARM_DESC(dma_sel,
  45                 "DMA Channel Selection Mask (default: 0x7f = all)");
  46
  47static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
  48{
  49        return container_of(chan, struct tsi721_bdma_chan, dchan);
  50}
  51
  52static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
  53{
  54        return container_of(ddev, struct rio_mport, dma)->priv;
  55}
  56
  57static inline
  58struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
  59{
  60        return container_of(txd, struct tsi721_tx_desc, txd);
  61}
  62
  63static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
  64{
  65        struct tsi721_dma_desc *bd_ptr;
  66        struct device *dev = bdma_chan->dchan.device->dev;
  67        u64             *sts_ptr;
  68        dma_addr_t      bd_phys;
  69        dma_addr_t      sts_phys;
  70        int             sts_size;
  71#ifdef CONFIG_PCI_MSI
  72        struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  73#endif
  74
  75        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  76
  77        /*
  78         * Allocate space for DMA descriptors
  79         * (add an extra element for link descriptor)
  80         */
  81        bd_ptr = dma_alloc_coherent(dev,
  82                                    (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  83                                    &bd_phys, GFP_ATOMIC);
  84        if (!bd_ptr)
  85                return -ENOMEM;
  86
  87        bdma_chan->bd_num = bd_num;
  88        bdma_chan->bd_phys = bd_phys;
  89        bdma_chan->bd_base = bd_ptr;
  90
  91        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  92                  "DMAC%d descriptors @ %p (phys = %pad)",
  93                  bdma_chan->id, bd_ptr, &bd_phys);
  94
  95        /* Allocate space for descriptor status FIFO */
  96        sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
  97                                        (bd_num + 1) : TSI721_DMA_MINSTSSZ;
  98        sts_size = roundup_pow_of_two(sts_size);
  99        sts_ptr = dma_alloc_coherent(dev,
 100                                     sts_size * sizeof(struct tsi721_dma_sts),
 101                                     &sts_phys, GFP_ATOMIC);
 102        if (!sts_ptr) {
 103                /* Free space allocated for DMA descriptors */
 104                dma_free_coherent(dev,
 105                                  (bd_num + 1) * sizeof(struct tsi721_dma_desc),
 106                                  bd_ptr, bd_phys);
 107                bdma_chan->bd_base = NULL;
 108                return -ENOMEM;
 109        }
 110
 111        bdma_chan->sts_phys = sts_phys;
 112        bdma_chan->sts_base = sts_ptr;
 113        bdma_chan->sts_size = sts_size;
 114
 115        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 116                "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
 117                bdma_chan->id, sts_ptr, &sts_phys, sts_size);
 118
 119        /* Initialize DMA descriptors ring using added link descriptor */
 120        bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
 121        bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
 122                                                 TSI721_DMAC_DPTRL_MASK);
 123        bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
 124
 125        /* Setup DMA descriptor pointers */
 126        iowrite32(((u64)bd_phys >> 32),
 127                bdma_chan->regs + TSI721_DMAC_DPTRH);
 128        iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
 129                bdma_chan->regs + TSI721_DMAC_DPTRL);
 130
 131        /* Setup descriptor status FIFO */
 132        iowrite32(((u64)sts_phys >> 32),
 133                bdma_chan->regs + TSI721_DMAC_DSBH);
 134        iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
 135                bdma_chan->regs + TSI721_DMAC_DSBL);
 136        iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
 137                bdma_chan->regs + TSI721_DMAC_DSSZ);
 138
 139        /* Clear interrupt bits */
 140        iowrite32(TSI721_DMAC_INT_ALL,
 141                bdma_chan->regs + TSI721_DMAC_INT);
 142
 143        ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 144
 145#ifdef CONFIG_PCI_MSI
 146        /* Request interrupt service if we are in MSI-X mode */
 147        if (priv->flags & TSI721_USING_MSIX) {
 148                int rc, idx;
 149
 150                idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
 151
 152                rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
 153                                 priv->msix[idx].irq_name, (void *)bdma_chan);
 154
 155                if (rc) {
 156                        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 157                                  "Unable to get MSI-X for DMAC%d-DONE",
 158                                  bdma_chan->id);
 159                        goto err_out;
 160                }
 161
 162                idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
 163
 164                rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
 165                                priv->msix[idx].irq_name, (void *)bdma_chan);
 166
 167                if (rc) {
 168                        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 169                                  "Unable to get MSI-X for DMAC%d-INT",
 170                                  bdma_chan->id);
 171                        free_irq(
 172                                priv->msix[TSI721_VECT_DMA0_DONE +
 173                                            bdma_chan->id].vector,
 174                                (void *)bdma_chan);
 175                }
 176
 177err_out:
 178                if (rc) {
 179                        /* Free space allocated for DMA descriptors */
 180                        dma_free_coherent(dev,
 181                                (bd_num + 1) * sizeof(struct tsi721_dma_desc),
 182                                bd_ptr, bd_phys);
 183                        bdma_chan->bd_base = NULL;
 184
 185                        /* Free space allocated for status descriptors */
 186                        dma_free_coherent(dev,
 187                                sts_size * sizeof(struct tsi721_dma_sts),
 188                                sts_ptr, sts_phys);
 189                        bdma_chan->sts_base = NULL;
 190
 191                        return -EIO;
 192                }
 193        }
 194#endif /* CONFIG_PCI_MSI */
 195
 196        /* Toggle DMA channel initialization */
 197        iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
 198        ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
 199        bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
 200        bdma_chan->sts_rdptr = 0;
 201        udelay(10);
 202
 203        return 0;
 204}
 205
 206static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
 207{
 208        u32 ch_stat;
 209#ifdef CONFIG_PCI_MSI
 210        struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
 211#endif
 212
 213        if (!bdma_chan->bd_base)
 214                return 0;
 215
 216        /* Check if DMA channel still running */
 217        ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 218        if (ch_stat & TSI721_DMAC_STS_RUN)
 219                return -EFAULT;
 220
 221        /* Put DMA channel into init state */
 222        iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
 223
 224#ifdef CONFIG_PCI_MSI
 225        if (priv->flags & TSI721_USING_MSIX) {
 226                free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
 227                                    bdma_chan->id].vector, (void *)bdma_chan);
 228                free_irq(priv->msix[TSI721_VECT_DMA0_INT +
 229                                    bdma_chan->id].vector, (void *)bdma_chan);
 230        }
 231#endif /* CONFIG_PCI_MSI */
 232
 233        /* Free space allocated for DMA descriptors */
 234        dma_free_coherent(bdma_chan->dchan.device->dev,
 235                (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
 236                bdma_chan->bd_base, bdma_chan->bd_phys);
 237        bdma_chan->bd_base = NULL;
 238
 239        /* Free space allocated for status FIFO */
 240        dma_free_coherent(bdma_chan->dchan.device->dev,
 241                bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
 242                bdma_chan->sts_base, bdma_chan->sts_phys);
 243        bdma_chan->sts_base = NULL;
 244        return 0;
 245}
 246
 247static void
 248tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
 249{
 250        if (enable) {
 251                /* Clear pending BDMA channel interrupts */
 252                iowrite32(TSI721_DMAC_INT_ALL,
 253                        bdma_chan->regs + TSI721_DMAC_INT);
 254                ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 255                /* Enable BDMA channel interrupts */
 256                iowrite32(TSI721_DMAC_INT_ALL,
 257                        bdma_chan->regs + TSI721_DMAC_INTE);
 258        } else {
 259                /* Disable BDMA channel interrupts */
 260                iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
 261                /* Clear pending BDMA channel interrupts */
 262                iowrite32(TSI721_DMAC_INT_ALL,
 263                        bdma_chan->regs + TSI721_DMAC_INT);
 264        }
 265
 266}
 267
 268static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
 269{
 270        u32 sts;
 271
 272        sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 273        return ((sts & TSI721_DMAC_STS_RUN) == 0);
 274}
 275
 276void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
 277{
 278        /* Disable BDMA channel interrupts */
 279        iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
 280        if (bdma_chan->active)
 281                tasklet_hi_schedule(&bdma_chan->tasklet);
 282}
 283
 284#ifdef CONFIG_PCI_MSI
 285/**
 286 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
 287 * @irq: Linux interrupt number
 288 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
 289 *
 290 * Handles BDMA channel interrupts signaled using MSI-X.
 291 */
 292static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
 293{
 294        struct tsi721_bdma_chan *bdma_chan = ptr;
 295
 296        if (bdma_chan->active)
 297                tasklet_hi_schedule(&bdma_chan->tasklet);
 298        return IRQ_HANDLED;
 299}
 300#endif /* CONFIG_PCI_MSI */
 301
 302/* Must be called with the spinlock held */
 303static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
 304{
 305        if (!tsi721_dma_is_idle(bdma_chan)) {
 306                tsi_err(&bdma_chan->dchan.dev->device,
 307                        "DMAC%d Attempt to start non-idle channel",
 308                        bdma_chan->id);
 309                return;
 310        }
 311
 312        if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
 313                tsi_err(&bdma_chan->dchan.dev->device,
 314                        "DMAC%d Attempt to start DMA with no BDs ready %d",
 315                        bdma_chan->id, task_pid_nr(current));
 316                return;
 317        }
 318
 319        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
 320                  bdma_chan->id, bdma_chan->wr_count_next,
 321                  task_pid_nr(current));
 322
 323        iowrite32(bdma_chan->wr_count_next,
 324                bdma_chan->regs + TSI721_DMAC_DWRCNT);
 325        ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
 326
 327        bdma_chan->wr_count = bdma_chan->wr_count_next;
 328}
 329
 330static int
 331tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
 332                      struct tsi721_dma_desc *bd_ptr,
 333                      struct scatterlist *sg, u32 sys_size)
 334{
 335        u64 rio_addr;
 336
 337        if (!bd_ptr)
 338                return -EINVAL;
 339
 340        /* Initialize DMA descriptor */
 341        bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
 342                                      (desc->rtype << 19) | desc->destid);
 343        bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
 344                                     (sys_size << 26));
 345        rio_addr = (desc->rio_addr >> 2) |
 346                                ((u64)(desc->rio_addr_u & 0x3) << 62);
 347        bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
 348        bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
 349        bd_ptr->t1.bufptr_lo = cpu_to_le32(
 350                                        (u64)sg_dma_address(sg) & 0xffffffff);
 351        bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
 352        bd_ptr->t1.s_dist = 0;
 353        bd_ptr->t1.s_size = 0;
 354
 355        return 0;
 356}
 357
 358static int
 359tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
 360{
 361        if (!bd_ptr)
 362                return -EINVAL;
 363
 364        /* Update DMA descriptor */
 365        if (interrupt)
 366                bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
 367        bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
 368
 369        return 0;
 370}
 371
 372static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
 373                              struct tsi721_tx_desc *desc)
 374{
 375        struct dma_async_tx_descriptor *txd = &desc->txd;
 376        dma_async_tx_callback callback = txd->callback;
 377        void *param = txd->callback_param;
 378
 379        list_move(&desc->desc_node, &bdma_chan->free_list);
 380
 381        if (callback)
 382                callback(param);
 383}
 384
 385static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
 386{
 387        u32 srd_ptr;
 388        u64 *sts_ptr;
 389        int i, j;
 390
 391        /* Check and clear descriptor status FIFO entries */
 392        srd_ptr = bdma_chan->sts_rdptr;
 393        sts_ptr = bdma_chan->sts_base;
 394        j = srd_ptr * 8;
 395        while (sts_ptr[j]) {
 396                for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
 397                        sts_ptr[j] = 0;
 398
 399                ++srd_ptr;
 400                srd_ptr %= bdma_chan->sts_size;
 401                j = srd_ptr * 8;
 402        }
 403
 404        iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
 405        bdma_chan->sts_rdptr = srd_ptr;
 406}
 407
 408/* Must be called with the channel spinlock held */
 409static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
 410{
 411        struct dma_chan *dchan = desc->txd.chan;
 412        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 413        u32 sys_size;
 414        u64 rio_addr;
 415        dma_addr_t next_addr;
 416        u32 bcount;
 417        struct scatterlist *sg;
 418        unsigned int i;
 419        int err = 0;
 420        struct tsi721_dma_desc *bd_ptr = NULL;
 421        u32 idx, rd_idx;
 422        u32 add_count = 0;
 423        struct device *ch_dev = &dchan->dev->device;
 424
 425        if (!tsi721_dma_is_idle(bdma_chan)) {
 426                tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
 427                        bdma_chan->id);
 428                return -EIO;
 429        }
 430
 431        /*
 432         * Fill DMA channel's hardware buffer descriptors.
 433         * (NOTE: RapidIO destination address is limited to 64 bits for now)
 434         */
 435        rio_addr = desc->rio_addr;
 436        next_addr = -1;
 437        bcount = 0;
 438        sys_size = dma_to_mport(dchan->device)->sys_size;
 439
 440        rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
 441        rd_idx %= (bdma_chan->bd_num + 1);
 442
 443        idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
 444        if (idx == bdma_chan->bd_num) {
 445                /* wrap around link descriptor */
 446                idx = 0;
 447                add_count++;
 448        }
 449
 450        tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
 451                  bdma_chan->id, rd_idx, idx);
 452
 453        for_each_sg(desc->sg, sg, desc->sg_len, i) {
 454
 455                tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
 456                        bdma_chan->id, i, desc->sg_len,
 457                        (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
 458
 459                if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
 460                        tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
 461                                bdma_chan->id, i);
 462                        err = -EINVAL;
 463                        break;
 464                }
 465
 466                /*
 467                 * If this sg entry forms contiguous block with previous one,
 468                 * try to merge it into existing DMA descriptor
 469                 */
 470                if (next_addr == sg_dma_address(sg) &&
 471                    bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
 472                        /* Adjust byte count of the descriptor */
 473                        bcount += sg_dma_len(sg);
 474                        goto entry_done;
 475                } else if (next_addr != -1) {
 476                        /* Finalize descriptor using total byte count value */
 477                        tsi721_desc_fill_end(bd_ptr, bcount, 0);
 478                        tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
 479                                  bdma_chan->id, bcount);
 480                }
 481
 482                desc->rio_addr = rio_addr;
 483
 484                if (i && idx == rd_idx) {
 485                        tsi_debug(DMAV, ch_dev,
 486                                  "DMAC%d HW descriptor ring is full @ %d",
 487                                  bdma_chan->id, i);
 488                        desc->sg = sg;
 489                        desc->sg_len -= i;
 490                        break;
 491                }
 492
 493                bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
 494                err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
 495                if (err) {
 496                        tsi_err(ch_dev, "Failed to build desc: err=%d", err);
 497                        break;
 498                }
 499
 500                tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
 501                          bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
 502
 503                next_addr = sg_dma_address(sg);
 504                bcount = sg_dma_len(sg);
 505
 506                add_count++;
 507                if (++idx == bdma_chan->bd_num) {
 508                        /* wrap around link descriptor */
 509                        idx = 0;
 510                        add_count++;
 511                }
 512
 513entry_done:
 514                if (sg_is_last(sg)) {
 515                        tsi721_desc_fill_end(bd_ptr, bcount, 0);
 516                        tsi_debug(DMAV, ch_dev,
 517                                  "DMAC%d last desc final len: %d",
 518                                  bdma_chan->id, bcount);
 519                        desc->sg_len = 0;
 520                } else {
 521                        rio_addr += sg_dma_len(sg);
 522                        next_addr += sg_dma_len(sg);
 523                }
 524        }
 525
 526        if (!err)
 527                bdma_chan->wr_count_next += add_count;
 528
 529        return err;
 530}
 531
 532static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
 533                                struct tsi721_tx_desc *desc)
 534{
 535        int err;
 536
 537        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
 538
 539        if (!tsi721_dma_is_idle(bdma_chan))
 540                return;
 541
 542        /*
 543         * If there is no data transfer in progress, fetch new descriptor from
 544         * the pending queue.
 545        */
 546        if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
 547                desc = list_first_entry(&bdma_chan->queue,
 548                                        struct tsi721_tx_desc, desc_node);
 549                list_del_init((&desc->desc_node));
 550                bdma_chan->active_tx = desc;
 551        }
 552
 553        if (desc) {
 554                err = tsi721_submit_sg(desc);
 555                if (!err)
 556                        tsi721_start_dma(bdma_chan);
 557                else {
 558                        tsi721_dma_tx_err(bdma_chan, desc);
 559                        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 560                                "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
 561                                bdma_chan->id, err);
 562                }
 563        }
 564
 565        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
 566                  bdma_chan->id);
 567}
 568
 569static void tsi721_dma_tasklet(unsigned long data)
 570{
 571        struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
 572        u32 dmac_int, dmac_sts;
 573
 574        dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 575        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
 576                  bdma_chan->id, dmac_int);
 577        /* Clear channel interrupts */
 578        iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
 579
 580        if (dmac_int & TSI721_DMAC_INT_ERR) {
 581                int i = 10000;
 582                struct tsi721_tx_desc *desc;
 583
 584                desc = bdma_chan->active_tx;
 585                dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 586                tsi_err(&bdma_chan->dchan.dev->device,
 587                        "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
 588                        bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
 589
 590                /* Re-initialize DMA channel if possible */
 591
 592                if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
 593                        goto err_out;
 594
 595                tsi721_clr_stat(bdma_chan);
 596
 597                spin_lock(&bdma_chan->lock);
 598
 599                /* Put DMA channel into init state */
 600                iowrite32(TSI721_DMAC_CTL_INIT,
 601                          bdma_chan->regs + TSI721_DMAC_CTL);
 602                do {
 603                        udelay(1);
 604                        dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 605                        i--;
 606                } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
 607
 608                if (dmac_sts & TSI721_DMAC_STS_ABORT) {
 609                        tsi_err(&bdma_chan->dchan.dev->device,
 610                                "Failed to re-initiate DMAC%d", bdma_chan->id);
 611                        spin_unlock(&bdma_chan->lock);
 612                        goto err_out;
 613                }
 614
 615                /* Setup DMA descriptor pointers */
 616                iowrite32(((u64)bdma_chan->bd_phys >> 32),
 617                        bdma_chan->regs + TSI721_DMAC_DPTRH);
 618                iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
 619                        bdma_chan->regs + TSI721_DMAC_DPTRL);
 620
 621                /* Setup descriptor status FIFO */
 622                iowrite32(((u64)bdma_chan->sts_phys >> 32),
 623                        bdma_chan->regs + TSI721_DMAC_DSBH);
 624                iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
 625                        bdma_chan->regs + TSI721_DMAC_DSBL);
 626                iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
 627                        bdma_chan->regs + TSI721_DMAC_DSSZ);
 628
 629                /* Clear interrupt bits */
 630                iowrite32(TSI721_DMAC_INT_ALL,
 631                        bdma_chan->regs + TSI721_DMAC_INT);
 632
 633                ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 634
 635                bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
 636                bdma_chan->sts_rdptr = 0;
 637                udelay(10);
 638
 639                desc = bdma_chan->active_tx;
 640                desc->status = DMA_ERROR;
 641                dma_cookie_complete(&desc->txd);
 642                list_add(&desc->desc_node, &bdma_chan->free_list);
 643                bdma_chan->active_tx = NULL;
 644                if (bdma_chan->active)
 645                        tsi721_advance_work(bdma_chan, NULL);
 646                spin_unlock(&bdma_chan->lock);
 647        }
 648
 649        if (dmac_int & TSI721_DMAC_INT_STFULL) {
 650                tsi_err(&bdma_chan->dchan.dev->device,
 651                        "DMAC%d descriptor status FIFO is full",
 652                        bdma_chan->id);
 653        }
 654
 655        if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
 656                struct tsi721_tx_desc *desc;
 657
 658                tsi721_clr_stat(bdma_chan);
 659                spin_lock(&bdma_chan->lock);
 660                desc = bdma_chan->active_tx;
 661
 662                if (desc->sg_len == 0) {
 663                        dma_async_tx_callback callback = NULL;
 664                        void *param = NULL;
 665
 666                        desc->status = DMA_COMPLETE;
 667                        dma_cookie_complete(&desc->txd);
 668                        if (desc->txd.flags & DMA_PREP_INTERRUPT) {
 669                                callback = desc->txd.callback;
 670                                param = desc->txd.callback_param;
 671                        }
 672                        list_add(&desc->desc_node, &bdma_chan->free_list);
 673                        bdma_chan->active_tx = NULL;
 674                        if (bdma_chan->active)
 675                                tsi721_advance_work(bdma_chan, NULL);
 676                        spin_unlock(&bdma_chan->lock);
 677                        if (callback)
 678                                callback(param);
 679                } else {
 680                        if (bdma_chan->active)
 681                                tsi721_advance_work(bdma_chan,
 682                                                    bdma_chan->active_tx);
 683                        spin_unlock(&bdma_chan->lock);
 684                }
 685        }
 686err_out:
 687        /* Re-Enable BDMA channel interrupts */
 688        iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
 689}
 690
 691static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
 692{
 693        struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
 694        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
 695        dma_cookie_t cookie;
 696
 697        /* Check if the descriptor is detached from any lists */
 698        if (!list_empty(&desc->desc_node)) {
 699                tsi_err(&bdma_chan->dchan.dev->device,
 700                        "DMAC%d wrong state of descriptor %p",
 701                        bdma_chan->id, txd);
 702                return -EIO;
 703        }
 704
 705        spin_lock_bh(&bdma_chan->lock);
 706
 707        if (!bdma_chan->active) {
 708                spin_unlock_bh(&bdma_chan->lock);
 709                return -ENODEV;
 710        }
 711
 712        cookie = dma_cookie_assign(txd);
 713        desc->status = DMA_IN_PROGRESS;
 714        list_add_tail(&desc->desc_node, &bdma_chan->queue);
 715        tsi721_advance_work(bdma_chan, NULL);
 716
 717        spin_unlock_bh(&bdma_chan->lock);
 718        return cookie;
 719}
 720
 721static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
 722{
 723        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 724        struct tsi721_tx_desc *desc;
 725        int i;
 726
 727        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 728
 729        if (bdma_chan->bd_base)
 730                return dma_txqueue_sz;
 731
 732        /* Initialize BDMA channel */
 733        if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
 734                tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
 735                        bdma_chan->id);
 736                return -ENODEV;
 737        }
 738
 739        /* Allocate queue of transaction descriptors */
 740        desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
 741                        GFP_ATOMIC);
 742        if (!desc) {
 743                tsi721_bdma_ch_free(bdma_chan);
 744                return -ENOMEM;
 745        }
 746
 747        bdma_chan->tx_desc = desc;
 748
 749        for (i = 0; i < dma_txqueue_sz; i++) {
 750                dma_async_tx_descriptor_init(&desc[i].txd, dchan);
 751                desc[i].txd.tx_submit = tsi721_tx_submit;
 752                desc[i].txd.flags = DMA_CTRL_ACK;
 753                list_add(&desc[i].desc_node, &bdma_chan->free_list);
 754        }
 755
 756        dma_cookie_init(dchan);
 757
 758        bdma_chan->active = true;
 759        tsi721_bdma_interrupt_enable(bdma_chan, 1);
 760
 761        return dma_txqueue_sz;
 762}
 763
 764static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
 765{
 766        struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
 767
 768#ifdef CONFIG_PCI_MSI
 769        if (priv->flags & TSI721_USING_MSIX) {
 770                synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
 771                                           bdma_chan->id].vector);
 772                synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
 773                                           bdma_chan->id].vector);
 774        } else
 775#endif
 776        synchronize_irq(priv->pdev->irq);
 777}
 778
 779static void tsi721_free_chan_resources(struct dma_chan *dchan)
 780{
 781        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 782
 783        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 784
 785        if (!bdma_chan->bd_base)
 786                return;
 787
 788        tsi721_bdma_interrupt_enable(bdma_chan, 0);
 789        bdma_chan->active = false;
 790        tsi721_sync_dma_irq(bdma_chan);
 791        tasklet_kill(&bdma_chan->tasklet);
 792        INIT_LIST_HEAD(&bdma_chan->free_list);
 793        kfree(bdma_chan->tx_desc);
 794        tsi721_bdma_ch_free(bdma_chan);
 795}
 796
 797static
 798enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
 799                                 struct dma_tx_state *txstate)
 800{
 801        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 802        enum dma_status status;
 803
 804        spin_lock_bh(&bdma_chan->lock);
 805        status = dma_cookie_status(dchan, cookie, txstate);
 806        spin_unlock_bh(&bdma_chan->lock);
 807        return status;
 808}
 809
 810static void tsi721_issue_pending(struct dma_chan *dchan)
 811{
 812        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 813
 814        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 815
 816        spin_lock_bh(&bdma_chan->lock);
 817        if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
 818                tsi721_advance_work(bdma_chan, NULL);
 819        }
 820        spin_unlock_bh(&bdma_chan->lock);
 821}
 822
 823static
 824struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
 825                        struct scatterlist *sgl, unsigned int sg_len,
 826                        enum dma_transfer_direction dir, unsigned long flags,
 827                        void *tinfo)
 828{
 829        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 830        struct tsi721_tx_desc *desc;
 831        struct rio_dma_ext *rext = tinfo;
 832        enum dma_rtype rtype;
 833        struct dma_async_tx_descriptor *txd = NULL;
 834
 835        if (!sgl || !sg_len) {
 836                tsi_err(&dchan->dev->device, "DMAC%d No SG list",
 837                        bdma_chan->id);
 838                return ERR_PTR(-EINVAL);
 839        }
 840
 841        tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
 842                  (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
 843
 844        if (dir == DMA_DEV_TO_MEM)
 845                rtype = NREAD;
 846        else if (dir == DMA_MEM_TO_DEV) {
 847                switch (rext->wr_type) {
 848                case RDW_ALL_NWRITE:
 849                        rtype = ALL_NWRITE;
 850                        break;
 851                case RDW_ALL_NWRITE_R:
 852                        rtype = ALL_NWRITE_R;
 853                        break;
 854                case RDW_LAST_NWRITE_R:
 855                default:
 856                        rtype = LAST_NWRITE_R;
 857                        break;
 858                }
 859        } else {
 860                tsi_err(&dchan->dev->device,
 861                        "DMAC%d Unsupported DMA direction option",
 862                        bdma_chan->id);
 863                return ERR_PTR(-EINVAL);
 864        }
 865
 866        spin_lock_bh(&bdma_chan->lock);
 867
 868        if (!list_empty(&bdma_chan->free_list)) {
 869                desc = list_first_entry(&bdma_chan->free_list,
 870                                struct tsi721_tx_desc, desc_node);
 871                list_del_init(&desc->desc_node);
 872                desc->destid = rext->destid;
 873                desc->rio_addr = rext->rio_addr;
 874                desc->rio_addr_u = 0;
 875                desc->rtype = rtype;
 876                desc->sg_len    = sg_len;
 877                desc->sg        = sgl;
 878                txd             = &desc->txd;
 879                txd->flags      = flags;
 880        }
 881
 882        spin_unlock_bh(&bdma_chan->lock);
 883
 884        if (!txd) {
 885                tsi_debug(DMA, &dchan->dev->device,
 886                          "DMAC%d free TXD is not available", bdma_chan->id);
 887                return ERR_PTR(-EBUSY);
 888        }
 889
 890        return txd;
 891}
 892
 893static int tsi721_terminate_all(struct dma_chan *dchan)
 894{
 895        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 896        struct tsi721_tx_desc *desc, *_d;
 897        LIST_HEAD(list);
 898
 899        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 900
 901        spin_lock_bh(&bdma_chan->lock);
 902
 903        bdma_chan->active = false;
 904
 905        while (!tsi721_dma_is_idle(bdma_chan)) {
 906
 907                udelay(5);
 908#if (0)
 909                /* make sure to stop the transfer */
 910                iowrite32(TSI721_DMAC_CTL_SUSP,
 911                          bdma_chan->regs + TSI721_DMAC_CTL);
 912
 913                /* Wait until DMA channel stops */
 914                do {
 915                        dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 916                } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
 917#endif
 918        }
 919
 920        if (bdma_chan->active_tx)
 921                list_add(&bdma_chan->active_tx->desc_node, &list);
 922        list_splice_init(&bdma_chan->queue, &list);
 923
 924        list_for_each_entry_safe(desc, _d, &list, desc_node)
 925                tsi721_dma_tx_err(bdma_chan, desc);
 926
 927        spin_unlock_bh(&bdma_chan->lock);
 928
 929        return 0;
 930}
 931
 932static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
 933{
 934        if (!bdma_chan->active)
 935                return;
 936        spin_lock_bh(&bdma_chan->lock);
 937        if (!tsi721_dma_is_idle(bdma_chan)) {
 938                int timeout = 100000;
 939
 940                /* stop the transfer in progress */
 941                iowrite32(TSI721_DMAC_CTL_SUSP,
 942                          bdma_chan->regs + TSI721_DMAC_CTL);
 943
 944                /* Wait until DMA channel stops */
 945                while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
 946                        udelay(1);
 947        }
 948
 949        spin_unlock_bh(&bdma_chan->lock);
 950}
 951
 952void tsi721_dma_stop_all(struct tsi721_device *priv)
 953{
 954        int i;
 955
 956        for (i = 0; i < TSI721_DMA_MAXCH; i++) {
 957                if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
 958                        tsi721_dma_stop(&priv->bdma[i]);
 959        }
 960}
 961
 962int tsi721_register_dma(struct tsi721_device *priv)
 963{
 964        int i;
 965        int nr_channels = 0;
 966        int err;
 967        struct rio_mport *mport = &priv->mport;
 968
 969        INIT_LIST_HEAD(&mport->dma.channels);
 970
 971        for (i = 0; i < TSI721_DMA_MAXCH; i++) {
 972                struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
 973
 974                if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
 975                        continue;
 976
 977                bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
 978
 979                bdma_chan->dchan.device = &mport->dma;
 980                bdma_chan->dchan.cookie = 1;
 981                bdma_chan->dchan.chan_id = i;
 982                bdma_chan->id = i;
 983                bdma_chan->active = false;
 984
 985                spin_lock_init(&bdma_chan->lock);
 986
 987                bdma_chan->active_tx = NULL;
 988                INIT_LIST_HEAD(&bdma_chan->queue);
 989                INIT_LIST_HEAD(&bdma_chan->free_list);
 990
 991                tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
 992                             (unsigned long)bdma_chan);
 993                list_add_tail(&bdma_chan->dchan.device_node,
 994                              &mport->dma.channels);
 995                nr_channels++;
 996        }
 997
 998        mport->dma.chancnt = nr_channels;
 999        dma_cap_zero(mport->dma.cap_mask);
1000        dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
1001        dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
1002
1003        mport->dma.dev = &priv->pdev->dev;
1004        mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
1005        mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
1006        mport->dma.device_tx_status = tsi721_tx_status;
1007        mport->dma.device_issue_pending = tsi721_issue_pending;
1008        mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
1009        mport->dma.device_terminate_all = tsi721_terminate_all;
1010
1011        err = dma_async_device_register(&mport->dma);
1012        if (err)
1013                tsi_err(&priv->pdev->dev, "Failed to register DMA device");
1014
1015        return err;
1016}
1017
1018void tsi721_unregister_dma(struct tsi721_device *priv)
1019{
1020        struct rio_mport *mport = &priv->mport;
1021        struct dma_chan *chan, *_c;
1022        struct tsi721_bdma_chan *bdma_chan;
1023
1024        tsi721_dma_stop_all(priv);
1025        dma_async_device_unregister(&mport->dma);
1026
1027        list_for_each_entry_safe(chan, _c, &mport->dma.channels,
1028                                        device_node) {
1029                bdma_chan = to_tsi721_chan(chan);
1030                if (bdma_chan->active) {
1031                        tsi721_bdma_interrupt_enable(bdma_chan, 0);
1032                        bdma_chan->active = false;
1033                        tsi721_sync_dma_irq(bdma_chan);
1034                        tasklet_kill(&bdma_chan->tasklet);
1035                        INIT_LIST_HEAD(&bdma_chan->free_list);
1036                        kfree(bdma_chan->tx_desc);
1037                        tsi721_bdma_ch_free(bdma_chan);
1038                }
1039
1040                list_del(&chan->device_node);
1041        }
1042}
1043