linux/drivers/rapidio/devices/tsi721_dma.c
<<
>>
Prefs
   1/*
   2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
   3 *
   4 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
   5 * Alexandre Bounine <alexandre.bounine@idt.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License as published by the Free
   9 * Software Foundation; either version 2 of the License, or (at your option)
  10 * any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful, but WITHOUT
  13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 * more details.
  16 *
  17 * The full GNU General Public License is included in this distribution in the
  18 * file called COPYING.
  19 */
  20
  21#include <linux/io.h>
  22#include <linux/errno.h>
  23#include <linux/init.h>
  24#include <linux/ioport.h>
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/pci.h>
  28#include <linux/rio.h>
  29#include <linux/rio_drv.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/interrupt.h>
  32#include <linux/kfifo.h>
  33#include <linux/sched.h>
  34#include <linux/delay.h>
  35#include "../../dma/dmaengine.h"
  36
  37#include "tsi721.h"
  38
  39#ifdef CONFIG_PCI_MSI
  40static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
  41#endif
  42static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
  43
  44static unsigned int dma_desc_per_channel = 128;
  45module_param(dma_desc_per_channel, uint, S_IRUGO);
  46MODULE_PARM_DESC(dma_desc_per_channel,
  47                 "Number of DMA descriptors per channel (default: 128)");
  48
  49static unsigned int dma_txqueue_sz = 16;
  50module_param(dma_txqueue_sz, uint, S_IRUGO);
  51MODULE_PARM_DESC(dma_txqueue_sz,
  52                 "DMA Transactions Queue Size (default: 16)");
  53
  54static u8 dma_sel = 0x7f;
  55module_param(dma_sel, byte, S_IRUGO);
  56MODULE_PARM_DESC(dma_sel,
  57                 "DMA Channel Selection Mask (default: 0x7f = all)");
  58
  59static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
  60{
  61        return container_of(chan, struct tsi721_bdma_chan, dchan);
  62}
  63
  64static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
  65{
  66        return container_of(ddev, struct rio_mport, dma)->priv;
  67}
  68
  69static inline
  70struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
  71{
  72        return container_of(txd, struct tsi721_tx_desc, txd);
  73}
  74
  75static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
  76{
  77        struct tsi721_dma_desc *bd_ptr;
  78        struct device *dev = bdma_chan->dchan.device->dev;
  79        u64             *sts_ptr;
  80        dma_addr_t      bd_phys;
  81        dma_addr_t      sts_phys;
  82        int             sts_size;
  83#ifdef CONFIG_PCI_MSI
  84        struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  85#endif
  86
  87        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  88
  89        /*
  90         * Allocate space for DMA descriptors
  91         * (add an extra element for link descriptor)
  92         */
  93        bd_ptr = dma_zalloc_coherent(dev,
  94                                (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  95                                &bd_phys, GFP_ATOMIC);
  96        if (!bd_ptr)
  97                return -ENOMEM;
  98
  99        bdma_chan->bd_num = bd_num;
 100        bdma_chan->bd_phys = bd_phys;
 101        bdma_chan->bd_base = bd_ptr;
 102
 103        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 104                  "DMAC%d descriptors @ %p (phys = %pad)",
 105                  bdma_chan->id, bd_ptr, &bd_phys);
 106
 107        /* Allocate space for descriptor status FIFO */
 108        sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
 109                                        (bd_num + 1) : TSI721_DMA_MINSTSSZ;
 110        sts_size = roundup_pow_of_two(sts_size);
 111        sts_ptr = dma_zalloc_coherent(dev,
 112                                     sts_size * sizeof(struct tsi721_dma_sts),
 113                                     &sts_phys, GFP_ATOMIC);
 114        if (!sts_ptr) {
 115                /* Free space allocated for DMA descriptors */
 116                dma_free_coherent(dev,
 117                                  (bd_num + 1) * sizeof(struct tsi721_dma_desc),
 118                                  bd_ptr, bd_phys);
 119                bdma_chan->bd_base = NULL;
 120                return -ENOMEM;
 121        }
 122
 123        bdma_chan->sts_phys = sts_phys;
 124        bdma_chan->sts_base = sts_ptr;
 125        bdma_chan->sts_size = sts_size;
 126
 127        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 128                "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
 129                bdma_chan->id, sts_ptr, &sts_phys, sts_size);
 130
 131        /* Initialize DMA descriptors ring using added link descriptor */
 132        bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
 133        bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
 134                                                 TSI721_DMAC_DPTRL_MASK);
 135        bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
 136
 137        /* Setup DMA descriptor pointers */
 138        iowrite32(((u64)bd_phys >> 32),
 139                bdma_chan->regs + TSI721_DMAC_DPTRH);
 140        iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
 141                bdma_chan->regs + TSI721_DMAC_DPTRL);
 142
 143        /* Setup descriptor status FIFO */
 144        iowrite32(((u64)sts_phys >> 32),
 145                bdma_chan->regs + TSI721_DMAC_DSBH);
 146        iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
 147                bdma_chan->regs + TSI721_DMAC_DSBL);
 148        iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
 149                bdma_chan->regs + TSI721_DMAC_DSSZ);
 150
 151        /* Clear interrupt bits */
 152        iowrite32(TSI721_DMAC_INT_ALL,
 153                bdma_chan->regs + TSI721_DMAC_INT);
 154
 155        ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 156
 157#ifdef CONFIG_PCI_MSI
 158        /* Request interrupt service if we are in MSI-X mode */
 159        if (priv->flags & TSI721_USING_MSIX) {
 160                int rc, idx;
 161
 162                idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
 163
 164                rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
 165                                 priv->msix[idx].irq_name, (void *)bdma_chan);
 166
 167                if (rc) {
 168                        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 169                                  "Unable to get MSI-X for DMAC%d-DONE",
 170                                  bdma_chan->id);
 171                        goto err_out;
 172                }
 173
 174                idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
 175
 176                rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
 177                                priv->msix[idx].irq_name, (void *)bdma_chan);
 178
 179                if (rc) {
 180                        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 181                                  "Unable to get MSI-X for DMAC%d-INT",
 182                                  bdma_chan->id);
 183                        free_irq(
 184                                priv->msix[TSI721_VECT_DMA0_DONE +
 185                                            bdma_chan->id].vector,
 186                                (void *)bdma_chan);
 187                }
 188
 189err_out:
 190                if (rc) {
 191                        /* Free space allocated for DMA descriptors */
 192                        dma_free_coherent(dev,
 193                                (bd_num + 1) * sizeof(struct tsi721_dma_desc),
 194                                bd_ptr, bd_phys);
 195                        bdma_chan->bd_base = NULL;
 196
 197                        /* Free space allocated for status descriptors */
 198                        dma_free_coherent(dev,
 199                                sts_size * sizeof(struct tsi721_dma_sts),
 200                                sts_ptr, sts_phys);
 201                        bdma_chan->sts_base = NULL;
 202
 203                        return -EIO;
 204                }
 205        }
 206#endif /* CONFIG_PCI_MSI */
 207
 208        /* Toggle DMA channel initialization */
 209        iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
 210        ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
 211        bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
 212        bdma_chan->sts_rdptr = 0;
 213        udelay(10);
 214
 215        return 0;
 216}
 217
 218static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
 219{
 220        u32 ch_stat;
 221#ifdef CONFIG_PCI_MSI
 222        struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
 223#endif
 224
 225        if (!bdma_chan->bd_base)
 226                return 0;
 227
 228        /* Check if DMA channel still running */
 229        ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 230        if (ch_stat & TSI721_DMAC_STS_RUN)
 231                return -EFAULT;
 232
 233        /* Put DMA channel into init state */
 234        iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
 235
 236#ifdef CONFIG_PCI_MSI
 237        if (priv->flags & TSI721_USING_MSIX) {
 238                free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
 239                                    bdma_chan->id].vector, (void *)bdma_chan);
 240                free_irq(priv->msix[TSI721_VECT_DMA0_INT +
 241                                    bdma_chan->id].vector, (void *)bdma_chan);
 242        }
 243#endif /* CONFIG_PCI_MSI */
 244
 245        /* Free space allocated for DMA descriptors */
 246        dma_free_coherent(bdma_chan->dchan.device->dev,
 247                (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
 248                bdma_chan->bd_base, bdma_chan->bd_phys);
 249        bdma_chan->bd_base = NULL;
 250
 251        /* Free space allocated for status FIFO */
 252        dma_free_coherent(bdma_chan->dchan.device->dev,
 253                bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
 254                bdma_chan->sts_base, bdma_chan->sts_phys);
 255        bdma_chan->sts_base = NULL;
 256        return 0;
 257}
 258
 259static void
 260tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
 261{
 262        if (enable) {
 263                /* Clear pending BDMA channel interrupts */
 264                iowrite32(TSI721_DMAC_INT_ALL,
 265                        bdma_chan->regs + TSI721_DMAC_INT);
 266                ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 267                /* Enable BDMA channel interrupts */
 268                iowrite32(TSI721_DMAC_INT_ALL,
 269                        bdma_chan->regs + TSI721_DMAC_INTE);
 270        } else {
 271                /* Disable BDMA channel interrupts */
 272                iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
 273                /* Clear pending BDMA channel interrupts */
 274                iowrite32(TSI721_DMAC_INT_ALL,
 275                        bdma_chan->regs + TSI721_DMAC_INT);
 276        }
 277
 278}
 279
 280static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
 281{
 282        u32 sts;
 283
 284        sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 285        return ((sts & TSI721_DMAC_STS_RUN) == 0);
 286}
 287
 288void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
 289{
 290        /* Disable BDMA channel interrupts */
 291        iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
 292        if (bdma_chan->active)
 293                tasklet_hi_schedule(&bdma_chan->tasklet);
 294}
 295
 296#ifdef CONFIG_PCI_MSI
 297/**
 298 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
 299 * @irq: Linux interrupt number
 300 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
 301 *
 302 * Handles BDMA channel interrupts signaled using MSI-X.
 303 */
 304static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
 305{
 306        struct tsi721_bdma_chan *bdma_chan = ptr;
 307
 308        if (bdma_chan->active)
 309                tasklet_hi_schedule(&bdma_chan->tasklet);
 310        return IRQ_HANDLED;
 311}
 312#endif /* CONFIG_PCI_MSI */
 313
 314/* Must be called with the spinlock held */
 315static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
 316{
 317        if (!tsi721_dma_is_idle(bdma_chan)) {
 318                tsi_err(&bdma_chan->dchan.dev->device,
 319                        "DMAC%d Attempt to start non-idle channel",
 320                        bdma_chan->id);
 321                return;
 322        }
 323
 324        if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
 325                tsi_err(&bdma_chan->dchan.dev->device,
 326                        "DMAC%d Attempt to start DMA with no BDs ready %d",
 327                        bdma_chan->id, task_pid_nr(current));
 328                return;
 329        }
 330
 331        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
 332                  bdma_chan->id, bdma_chan->wr_count_next,
 333                  task_pid_nr(current));
 334
 335        iowrite32(bdma_chan->wr_count_next,
 336                bdma_chan->regs + TSI721_DMAC_DWRCNT);
 337        ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
 338
 339        bdma_chan->wr_count = bdma_chan->wr_count_next;
 340}
 341
 342static int
 343tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
 344                      struct tsi721_dma_desc *bd_ptr,
 345                      struct scatterlist *sg, u32 sys_size)
 346{
 347        u64 rio_addr;
 348
 349        if (!bd_ptr)
 350                return -EINVAL;
 351
 352        /* Initialize DMA descriptor */
 353        bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
 354                                      (desc->rtype << 19) | desc->destid);
 355        bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
 356                                     (sys_size << 26));
 357        rio_addr = (desc->rio_addr >> 2) |
 358                                ((u64)(desc->rio_addr_u & 0x3) << 62);
 359        bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
 360        bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
 361        bd_ptr->t1.bufptr_lo = cpu_to_le32(
 362                                        (u64)sg_dma_address(sg) & 0xffffffff);
 363        bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
 364        bd_ptr->t1.s_dist = 0;
 365        bd_ptr->t1.s_size = 0;
 366
 367        return 0;
 368}
 369
 370static int
 371tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
 372{
 373        if (!bd_ptr)
 374                return -EINVAL;
 375
 376        /* Update DMA descriptor */
 377        if (interrupt)
 378                bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
 379        bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
 380
 381        return 0;
 382}
 383
 384static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
 385                              struct tsi721_tx_desc *desc)
 386{
 387        struct dma_async_tx_descriptor *txd = &desc->txd;
 388        dma_async_tx_callback callback = txd->callback;
 389        void *param = txd->callback_param;
 390
 391        list_move(&desc->desc_node, &bdma_chan->free_list);
 392
 393        if (callback)
 394                callback(param);
 395}
 396
 397static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
 398{
 399        u32 srd_ptr;
 400        u64 *sts_ptr;
 401        int i, j;
 402
 403        /* Check and clear descriptor status FIFO entries */
 404        srd_ptr = bdma_chan->sts_rdptr;
 405        sts_ptr = bdma_chan->sts_base;
 406        j = srd_ptr * 8;
 407        while (sts_ptr[j]) {
 408                for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
 409                        sts_ptr[j] = 0;
 410
 411                ++srd_ptr;
 412                srd_ptr %= bdma_chan->sts_size;
 413                j = srd_ptr * 8;
 414        }
 415
 416        iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
 417        bdma_chan->sts_rdptr = srd_ptr;
 418}
 419
 420/* Must be called with the channel spinlock held */
 421static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
 422{
 423        struct dma_chan *dchan = desc->txd.chan;
 424        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 425        u32 sys_size;
 426        u64 rio_addr;
 427        dma_addr_t next_addr;
 428        u32 bcount;
 429        struct scatterlist *sg;
 430        unsigned int i;
 431        int err = 0;
 432        struct tsi721_dma_desc *bd_ptr = NULL;
 433        u32 idx, rd_idx;
 434        u32 add_count = 0;
 435        struct device *ch_dev = &dchan->dev->device;
 436
 437        if (!tsi721_dma_is_idle(bdma_chan)) {
 438                tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
 439                        bdma_chan->id);
 440                return -EIO;
 441        }
 442
 443        /*
 444         * Fill DMA channel's hardware buffer descriptors.
 445         * (NOTE: RapidIO destination address is limited to 64 bits for now)
 446         */
 447        rio_addr = desc->rio_addr;
 448        next_addr = -1;
 449        bcount = 0;
 450        sys_size = dma_to_mport(dchan->device)->sys_size;
 451
 452        rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
 453        rd_idx %= (bdma_chan->bd_num + 1);
 454
 455        idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
 456        if (idx == bdma_chan->bd_num) {
 457                /* wrap around link descriptor */
 458                idx = 0;
 459                add_count++;
 460        }
 461
 462        tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
 463                  bdma_chan->id, rd_idx, idx);
 464
 465        for_each_sg(desc->sg, sg, desc->sg_len, i) {
 466
 467                tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
 468                        bdma_chan->id, i, desc->sg_len,
 469                        (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
 470
 471                if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
 472                        tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
 473                                bdma_chan->id, i);
 474                        err = -EINVAL;
 475                        break;
 476                }
 477
 478                /*
 479                 * If this sg entry forms contiguous block with previous one,
 480                 * try to merge it into existing DMA descriptor
 481                 */
 482                if (next_addr == sg_dma_address(sg) &&
 483                    bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
 484                        /* Adjust byte count of the descriptor */
 485                        bcount += sg_dma_len(sg);
 486                        goto entry_done;
 487                } else if (next_addr != -1) {
 488                        /* Finalize descriptor using total byte count value */
 489                        tsi721_desc_fill_end(bd_ptr, bcount, 0);
 490                        tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
 491                                  bdma_chan->id, bcount);
 492                }
 493
 494                desc->rio_addr = rio_addr;
 495
 496                if (i && idx == rd_idx) {
 497                        tsi_debug(DMAV, ch_dev,
 498                                  "DMAC%d HW descriptor ring is full @ %d",
 499                                  bdma_chan->id, i);
 500                        desc->sg = sg;
 501                        desc->sg_len -= i;
 502                        break;
 503                }
 504
 505                bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
 506                err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
 507                if (err) {
 508                        tsi_err(ch_dev, "Failed to build desc: err=%d", err);
 509                        break;
 510                }
 511
 512                tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
 513                          bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
 514
 515                next_addr = sg_dma_address(sg);
 516                bcount = sg_dma_len(sg);
 517
 518                add_count++;
 519                if (++idx == bdma_chan->bd_num) {
 520                        /* wrap around link descriptor */
 521                        idx = 0;
 522                        add_count++;
 523                }
 524
 525entry_done:
 526                if (sg_is_last(sg)) {
 527                        tsi721_desc_fill_end(bd_ptr, bcount, 0);
 528                        tsi_debug(DMAV, ch_dev,
 529                                  "DMAC%d last desc final len: %d",
 530                                  bdma_chan->id, bcount);
 531                        desc->sg_len = 0;
 532                } else {
 533                        rio_addr += sg_dma_len(sg);
 534                        next_addr += sg_dma_len(sg);
 535                }
 536        }
 537
 538        if (!err)
 539                bdma_chan->wr_count_next += add_count;
 540
 541        return err;
 542}
 543
 544static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
 545                                struct tsi721_tx_desc *desc)
 546{
 547        int err;
 548
 549        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
 550
 551        if (!tsi721_dma_is_idle(bdma_chan))
 552                return;
 553
 554        /*
 555         * If there is no data transfer in progress, fetch new descriptor from
 556         * the pending queue.
 557        */
 558        if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
 559                desc = list_first_entry(&bdma_chan->queue,
 560                                        struct tsi721_tx_desc, desc_node);
 561                list_del_init((&desc->desc_node));
 562                bdma_chan->active_tx = desc;
 563        }
 564
 565        if (desc) {
 566                err = tsi721_submit_sg(desc);
 567                if (!err)
 568                        tsi721_start_dma(bdma_chan);
 569                else {
 570                        tsi721_dma_tx_err(bdma_chan, desc);
 571                        tsi_debug(DMA, &bdma_chan->dchan.dev->device,
 572                                "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
 573                                bdma_chan->id, err);
 574                }
 575        }
 576
 577        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
 578                  bdma_chan->id);
 579}
 580
 581static void tsi721_dma_tasklet(unsigned long data)
 582{
 583        struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
 584        u32 dmac_int, dmac_sts;
 585
 586        dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 587        tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
 588                  bdma_chan->id, dmac_int);
 589        /* Clear channel interrupts */
 590        iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
 591
 592        if (dmac_int & TSI721_DMAC_INT_ERR) {
 593                int i = 10000;
 594                struct tsi721_tx_desc *desc;
 595
 596                desc = bdma_chan->active_tx;
 597                dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 598                tsi_err(&bdma_chan->dchan.dev->device,
 599                        "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
 600                        bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
 601
 602                /* Re-initialize DMA channel if possible */
 603
 604                if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
 605                        goto err_out;
 606
 607                tsi721_clr_stat(bdma_chan);
 608
 609                spin_lock(&bdma_chan->lock);
 610
 611                /* Put DMA channel into init state */
 612                iowrite32(TSI721_DMAC_CTL_INIT,
 613                          bdma_chan->regs + TSI721_DMAC_CTL);
 614                do {
 615                        udelay(1);
 616                        dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
 617                        i--;
 618                } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
 619
 620                if (dmac_sts & TSI721_DMAC_STS_ABORT) {
 621                        tsi_err(&bdma_chan->dchan.dev->device,
 622                                "Failed to re-initiate DMAC%d", bdma_chan->id);
 623                        spin_unlock(&bdma_chan->lock);
 624                        goto err_out;
 625                }
 626
 627                /* Setup DMA descriptor pointers */
 628                iowrite32(((u64)bdma_chan->bd_phys >> 32),
 629                        bdma_chan->regs + TSI721_DMAC_DPTRH);
 630                iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
 631                        bdma_chan->regs + TSI721_DMAC_DPTRL);
 632
 633                /* Setup descriptor status FIFO */
 634                iowrite32(((u64)bdma_chan->sts_phys >> 32),
 635                        bdma_chan->regs + TSI721_DMAC_DSBH);
 636                iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
 637                        bdma_chan->regs + TSI721_DMAC_DSBL);
 638                iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
 639                        bdma_chan->regs + TSI721_DMAC_DSSZ);
 640
 641                /* Clear interrupt bits */
 642                iowrite32(TSI721_DMAC_INT_ALL,
 643                        bdma_chan->regs + TSI721_DMAC_INT);
 644
 645                ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 646
 647                bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
 648                bdma_chan->sts_rdptr = 0;
 649                udelay(10);
 650
 651                desc = bdma_chan->active_tx;
 652                desc->status = DMA_ERROR;
 653                dma_cookie_complete(&desc->txd);
 654                list_add(&desc->desc_node, &bdma_chan->free_list);
 655                bdma_chan->active_tx = NULL;
 656                if (bdma_chan->active)
 657                        tsi721_advance_work(bdma_chan, NULL);
 658                spin_unlock(&bdma_chan->lock);
 659        }
 660
 661        if (dmac_int & TSI721_DMAC_INT_STFULL) {
 662                tsi_err(&bdma_chan->dchan.dev->device,
 663                        "DMAC%d descriptor status FIFO is full",
 664                        bdma_chan->id);
 665        }
 666
 667        if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
 668                struct tsi721_tx_desc *desc;
 669
 670                tsi721_clr_stat(bdma_chan);
 671                spin_lock(&bdma_chan->lock);
 672                desc = bdma_chan->active_tx;
 673
 674                if (desc->sg_len == 0) {
 675                        dma_async_tx_callback callback = NULL;
 676                        void *param = NULL;
 677
 678                        desc->status = DMA_COMPLETE;
 679                        dma_cookie_complete(&desc->txd);
 680                        if (desc->txd.flags & DMA_PREP_INTERRUPT) {
 681                                callback = desc->txd.callback;
 682                                param = desc->txd.callback_param;
 683                        }
 684                        list_add(&desc->desc_node, &bdma_chan->free_list);
 685                        bdma_chan->active_tx = NULL;
 686                        if (bdma_chan->active)
 687                                tsi721_advance_work(bdma_chan, NULL);
 688                        spin_unlock(&bdma_chan->lock);
 689                        if (callback)
 690                                callback(param);
 691                } else {
 692                        if (bdma_chan->active)
 693                                tsi721_advance_work(bdma_chan,
 694                                                    bdma_chan->active_tx);
 695                        spin_unlock(&bdma_chan->lock);
 696                }
 697        }
 698err_out:
 699        /* Re-Enable BDMA channel interrupts */
 700        iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
 701}
 702
 703static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
 704{
 705        struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
 706        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
 707        dma_cookie_t cookie;
 708
 709        /* Check if the descriptor is detached from any lists */
 710        if (!list_empty(&desc->desc_node)) {
 711                tsi_err(&bdma_chan->dchan.dev->device,
 712                        "DMAC%d wrong state of descriptor %p",
 713                        bdma_chan->id, txd);
 714                return -EIO;
 715        }
 716
 717        spin_lock_bh(&bdma_chan->lock);
 718
 719        if (!bdma_chan->active) {
 720                spin_unlock_bh(&bdma_chan->lock);
 721                return -ENODEV;
 722        }
 723
 724        cookie = dma_cookie_assign(txd);
 725        desc->status = DMA_IN_PROGRESS;
 726        list_add_tail(&desc->desc_node, &bdma_chan->queue);
 727        tsi721_advance_work(bdma_chan, NULL);
 728
 729        spin_unlock_bh(&bdma_chan->lock);
 730        return cookie;
 731}
 732
 733static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
 734{
 735        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 736        struct tsi721_tx_desc *desc;
 737        int i;
 738
 739        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 740
 741        if (bdma_chan->bd_base)
 742                return dma_txqueue_sz;
 743
 744        /* Initialize BDMA channel */
 745        if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
 746                tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
 747                        bdma_chan->id);
 748                return -ENODEV;
 749        }
 750
 751        /* Allocate queue of transaction descriptors */
 752        desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
 753                        GFP_ATOMIC);
 754        if (!desc) {
 755                tsi721_bdma_ch_free(bdma_chan);
 756                return -ENOMEM;
 757        }
 758
 759        bdma_chan->tx_desc = desc;
 760
 761        for (i = 0; i < dma_txqueue_sz; i++) {
 762                dma_async_tx_descriptor_init(&desc[i].txd, dchan);
 763                desc[i].txd.tx_submit = tsi721_tx_submit;
 764                desc[i].txd.flags = DMA_CTRL_ACK;
 765                list_add(&desc[i].desc_node, &bdma_chan->free_list);
 766        }
 767
 768        dma_cookie_init(dchan);
 769
 770        bdma_chan->active = true;
 771        tsi721_bdma_interrupt_enable(bdma_chan, 1);
 772
 773        return dma_txqueue_sz;
 774}
 775
 776static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
 777{
 778        struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
 779
 780#ifdef CONFIG_PCI_MSI
 781        if (priv->flags & TSI721_USING_MSIX) {
 782                synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
 783                                           bdma_chan->id].vector);
 784                synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
 785                                           bdma_chan->id].vector);
 786        } else
 787#endif
 788        synchronize_irq(priv->pdev->irq);
 789}
 790
 791static void tsi721_free_chan_resources(struct dma_chan *dchan)
 792{
 793        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 794
 795        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 796
 797        if (!bdma_chan->bd_base)
 798                return;
 799
 800        tsi721_bdma_interrupt_enable(bdma_chan, 0);
 801        bdma_chan->active = false;
 802        tsi721_sync_dma_irq(bdma_chan);
 803        tasklet_kill(&bdma_chan->tasklet);
 804        INIT_LIST_HEAD(&bdma_chan->free_list);
 805        kfree(bdma_chan->tx_desc);
 806        tsi721_bdma_ch_free(bdma_chan);
 807}
 808
 809static
 810enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
 811                                 struct dma_tx_state *txstate)
 812{
 813        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 814        enum dma_status status;
 815
 816        spin_lock_bh(&bdma_chan->lock);
 817        status = dma_cookie_status(dchan, cookie, txstate);
 818        spin_unlock_bh(&bdma_chan->lock);
 819        return status;
 820}
 821
 822static void tsi721_issue_pending(struct dma_chan *dchan)
 823{
 824        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 825
 826        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 827
 828        spin_lock_bh(&bdma_chan->lock);
 829        if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
 830                tsi721_advance_work(bdma_chan, NULL);
 831        }
 832        spin_unlock_bh(&bdma_chan->lock);
 833}
 834
 835static
 836struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
 837                        struct scatterlist *sgl, unsigned int sg_len,
 838                        enum dma_transfer_direction dir, unsigned long flags,
 839                        void *tinfo)
 840{
 841        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 842        struct tsi721_tx_desc *desc;
 843        struct rio_dma_ext *rext = tinfo;
 844        enum dma_rtype rtype;
 845        struct dma_async_tx_descriptor *txd = NULL;
 846
 847        if (!sgl || !sg_len) {
 848                tsi_err(&dchan->dev->device, "DMAC%d No SG list",
 849                        bdma_chan->id);
 850                return ERR_PTR(-EINVAL);
 851        }
 852
 853        tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
 854                  (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
 855
 856        if (dir == DMA_DEV_TO_MEM)
 857                rtype = NREAD;
 858        else if (dir == DMA_MEM_TO_DEV) {
 859                switch (rext->wr_type) {
 860                case RDW_ALL_NWRITE:
 861                        rtype = ALL_NWRITE;
 862                        break;
 863                case RDW_ALL_NWRITE_R:
 864                        rtype = ALL_NWRITE_R;
 865                        break;
 866                case RDW_LAST_NWRITE_R:
 867                default:
 868                        rtype = LAST_NWRITE_R;
 869                        break;
 870                }
 871        } else {
 872                tsi_err(&dchan->dev->device,
 873                        "DMAC%d Unsupported DMA direction option",
 874                        bdma_chan->id);
 875                return ERR_PTR(-EINVAL);
 876        }
 877
 878        spin_lock_bh(&bdma_chan->lock);
 879
 880        if (!list_empty(&bdma_chan->free_list)) {
 881                desc = list_first_entry(&bdma_chan->free_list,
 882                                struct tsi721_tx_desc, desc_node);
 883                list_del_init(&desc->desc_node);
 884                desc->destid = rext->destid;
 885                desc->rio_addr = rext->rio_addr;
 886                desc->rio_addr_u = 0;
 887                desc->rtype = rtype;
 888                desc->sg_len    = sg_len;
 889                desc->sg        = sgl;
 890                txd             = &desc->txd;
 891                txd->flags      = flags;
 892        }
 893
 894        spin_unlock_bh(&bdma_chan->lock);
 895
 896        if (!txd) {
 897                tsi_debug(DMA, &dchan->dev->device,
 898                          "DMAC%d free TXD is not available", bdma_chan->id);
 899                return ERR_PTR(-EBUSY);
 900        }
 901
 902        return txd;
 903}
 904
 905static int tsi721_terminate_all(struct dma_chan *dchan)
 906{
 907        struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
 908        struct tsi721_tx_desc *desc, *_d;
 909        LIST_HEAD(list);
 910
 911        tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 912
 913        spin_lock_bh(&bdma_chan->lock);
 914
 915        bdma_chan->active = false;
 916
 917        while (!tsi721_dma_is_idle(bdma_chan)) {
 918
 919                udelay(5);
 920#if (0)
 921                /* make sure to stop the transfer */
 922                iowrite32(TSI721_DMAC_CTL_SUSP,
 923                          bdma_chan->regs + TSI721_DMAC_CTL);
 924
 925                /* Wait until DMA channel stops */
 926                do {
 927                        dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
 928                } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
 929#endif
 930        }
 931
 932        if (bdma_chan->active_tx)
 933                list_add(&bdma_chan->active_tx->desc_node, &list);
 934        list_splice_init(&bdma_chan->queue, &list);
 935
 936        list_for_each_entry_safe(desc, _d, &list, desc_node)
 937                tsi721_dma_tx_err(bdma_chan, desc);
 938
 939        spin_unlock_bh(&bdma_chan->lock);
 940
 941        return 0;
 942}
 943
 944static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
 945{
 946        if (!bdma_chan->active)
 947                return;
 948        spin_lock_bh(&bdma_chan->lock);
 949        if (!tsi721_dma_is_idle(bdma_chan)) {
 950                int timeout = 100000;
 951
 952                /* stop the transfer in progress */
 953                iowrite32(TSI721_DMAC_CTL_SUSP,
 954                          bdma_chan->regs + TSI721_DMAC_CTL);
 955
 956                /* Wait until DMA channel stops */
 957                while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
 958                        udelay(1);
 959        }
 960
 961        spin_unlock_bh(&bdma_chan->lock);
 962}
 963
 964void tsi721_dma_stop_all(struct tsi721_device *priv)
 965{
 966        int i;
 967
 968        for (i = 0; i < TSI721_DMA_MAXCH; i++) {
 969                if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
 970                        tsi721_dma_stop(&priv->bdma[i]);
 971        }
 972}
 973
 974int tsi721_register_dma(struct tsi721_device *priv)
 975{
 976        int i;
 977        int nr_channels = 0;
 978        int err;
 979        struct rio_mport *mport = &priv->mport;
 980
 981        INIT_LIST_HEAD(&mport->dma.channels);
 982
 983        for (i = 0; i < TSI721_DMA_MAXCH; i++) {
 984                struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
 985
 986                if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
 987                        continue;
 988
 989                bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
 990
 991                bdma_chan->dchan.device = &mport->dma;
 992                bdma_chan->dchan.cookie = 1;
 993                bdma_chan->dchan.chan_id = i;
 994                bdma_chan->id = i;
 995                bdma_chan->active = false;
 996
 997                spin_lock_init(&bdma_chan->lock);
 998
 999                bdma_chan->active_tx = NULL;
1000                INIT_LIST_HEAD(&bdma_chan->queue);
1001                INIT_LIST_HEAD(&bdma_chan->free_list);
1002
1003                tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
1004                             (unsigned long)bdma_chan);
1005                list_add_tail(&bdma_chan->dchan.device_node,
1006                              &mport->dma.channels);
1007                nr_channels++;
1008        }
1009
1010        mport->dma.chancnt = nr_channels;
1011        dma_cap_zero(mport->dma.cap_mask);
1012        dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
1013        dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
1014
1015        mport->dma.dev = &priv->pdev->dev;
1016        mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
1017        mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
1018        mport->dma.device_tx_status = tsi721_tx_status;
1019        mport->dma.device_issue_pending = tsi721_issue_pending;
1020        mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
1021        mport->dma.device_terminate_all = tsi721_terminate_all;
1022
1023        err = dma_async_device_register(&mport->dma);
1024        if (err)
1025                tsi_err(&priv->pdev->dev, "Failed to register DMA device");
1026
1027        return err;
1028}
1029
1030void tsi721_unregister_dma(struct tsi721_device *priv)
1031{
1032        struct rio_mport *mport = &priv->mport;
1033        struct dma_chan *chan, *_c;
1034        struct tsi721_bdma_chan *bdma_chan;
1035
1036        tsi721_dma_stop_all(priv);
1037        dma_async_device_unregister(&mport->dma);
1038
1039        list_for_each_entry_safe(chan, _c, &mport->dma.channels,
1040                                        device_node) {
1041                bdma_chan = to_tsi721_chan(chan);
1042                if (bdma_chan->active) {
1043                        tsi721_bdma_interrupt_enable(bdma_chan, 0);
1044                        bdma_chan->active = false;
1045                        tsi721_sync_dma_irq(bdma_chan);
1046                        tasklet_kill(&bdma_chan->tasklet);
1047                        INIT_LIST_HEAD(&bdma_chan->free_list);
1048                        kfree(bdma_chan->tx_desc);
1049                        tsi721_bdma_ch_free(bdma_chan);
1050                }
1051
1052                list_del(&chan->device_node);
1053        }
1054}
1055