linux/drivers/dma/plx_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Microsemi Switchtec(tm) PCIe Management Driver
   4 * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
   5 * Copyright (c) 2019, GigaIO Networks, Inc
   6 */
   7
   8#include "dmaengine.h"
   9
  10#include <linux/circ_buf.h>
  11#include <linux/dmaengine.h>
  12#include <linux/kref.h>
  13#include <linux/list.h>
  14#include <linux/module.h>
  15#include <linux/pci.h>
  16
  17MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
  18MODULE_VERSION("0.1");
  19MODULE_LICENSE("GPL");
  20MODULE_AUTHOR("Logan Gunthorpe");
  21
  22#define PLX_REG_DESC_RING_ADDR                  0x214
  23#define PLX_REG_DESC_RING_ADDR_HI               0x218
  24#define PLX_REG_DESC_RING_NEXT_ADDR             0x21C
  25#define PLX_REG_DESC_RING_COUNT                 0x220
  26#define PLX_REG_DESC_RING_LAST_ADDR             0x224
  27#define PLX_REG_DESC_RING_LAST_SIZE             0x228
  28#define PLX_REG_PREF_LIMIT                      0x234
  29#define PLX_REG_CTRL                            0x238
  30#define PLX_REG_CTRL2                           0x23A
  31#define PLX_REG_INTR_CTRL                       0x23C
  32#define PLX_REG_INTR_STATUS                     0x23E
  33
  34#define PLX_REG_PREF_LIMIT_PREF_FOUR            8
  35
  36#define PLX_REG_CTRL_GRACEFUL_PAUSE             BIT(0)
  37#define PLX_REG_CTRL_ABORT                      BIT(1)
  38#define PLX_REG_CTRL_WRITE_BACK_EN              BIT(2)
  39#define PLX_REG_CTRL_START                      BIT(3)
  40#define PLX_REG_CTRL_RING_STOP_MODE             BIT(4)
  41#define PLX_REG_CTRL_DESC_MODE_BLOCK            (0 << 5)
  42#define PLX_REG_CTRL_DESC_MODE_ON_CHIP          (1 << 5)
  43#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP         (2 << 5)
  44#define PLX_REG_CTRL_DESC_INVALID               BIT(8)
  45#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE        BIT(9)
  46#define PLX_REG_CTRL_ABORT_DONE                 BIT(10)
  47#define PLX_REG_CTRL_IMM_PAUSE_DONE             BIT(12)
  48#define PLX_REG_CTRL_IN_PROGRESS                BIT(30)
  49
  50#define PLX_REG_CTRL_RESET_VAL  (PLX_REG_CTRL_DESC_INVALID | \
  51                                 PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
  52                                 PLX_REG_CTRL_ABORT_DONE | \
  53                                 PLX_REG_CTRL_IMM_PAUSE_DONE)
  54
  55#define PLX_REG_CTRL_START_VAL  (PLX_REG_CTRL_WRITE_BACK_EN | \
  56                                 PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
  57                                 PLX_REG_CTRL_START | \
  58                                 PLX_REG_CTRL_RESET_VAL)
  59
  60#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B         0
  61#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B        1
  62#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B        2
  63#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B        3
  64#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB         4
  65#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB         5
  66#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B          7
  67
  68#define PLX_REG_INTR_CRTL_ERROR_EN              BIT(0)
  69#define PLX_REG_INTR_CRTL_INV_DESC_EN           BIT(1)
  70#define PLX_REG_INTR_CRTL_ABORT_DONE_EN         BIT(3)
  71#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN         BIT(4)
  72#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN     BIT(5)
  73
  74#define PLX_REG_INTR_STATUS_ERROR               BIT(0)
  75#define PLX_REG_INTR_STATUS_INV_DESC            BIT(1)
  76#define PLX_REG_INTR_STATUS_DESC_DONE           BIT(2)
  77#define PLX_REG_INTR_CRTL_ABORT_DONE            BIT(3)
  78
  79struct plx_dma_hw_std_desc {
  80        __le32 flags_and_size;
  81        __le16 dst_addr_hi;
  82        __le16 src_addr_hi;
  83        __le32 dst_addr_lo;
  84        __le32 src_addr_lo;
  85};
  86
  87#define PLX_DESC_SIZE_MASK              0x7ffffff
  88#define PLX_DESC_FLAG_VALID             BIT(31)
  89#define PLX_DESC_FLAG_INT_WHEN_DONE     BIT(30)
  90
  91#define PLX_DESC_WB_SUCCESS             BIT(30)
  92#define PLX_DESC_WB_RD_FAIL             BIT(29)
  93#define PLX_DESC_WB_WR_FAIL             BIT(28)
  94
  95#define PLX_DMA_RING_COUNT              2048
  96
  97struct plx_dma_desc {
  98        struct dma_async_tx_descriptor txd;
  99        struct plx_dma_hw_std_desc *hw;
 100        u32 orig_size;
 101};
 102
 103struct plx_dma_dev {
 104        struct dma_device dma_dev;
 105        struct dma_chan dma_chan;
 106        struct pci_dev __rcu *pdev;
 107        void __iomem *bar;
 108        struct tasklet_struct desc_task;
 109
 110        spinlock_t ring_lock;
 111        bool ring_active;
 112        int head;
 113        int tail;
 114        struct plx_dma_hw_std_desc *hw_ring;
 115        dma_addr_t hw_ring_dma;
 116        struct plx_dma_desc **desc_ring;
 117};
 118
 119static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
 120{
 121        return container_of(c, struct plx_dma_dev, dma_chan);
 122}
 123
 124static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
 125{
 126        return container_of(txd, struct plx_dma_desc, txd);
 127}
 128
 129static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
 130{
 131        return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
 132}
 133
 134static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
 135{
 136        struct dmaengine_result res;
 137        struct plx_dma_desc *desc;
 138        u32 flags;
 139
 140        spin_lock_bh(&plxdev->ring_lock);
 141
 142        while (plxdev->tail != plxdev->head) {
 143                desc = plx_dma_get_desc(plxdev, plxdev->tail);
 144
 145                flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
 146
 147                if (flags & PLX_DESC_FLAG_VALID)
 148                        break;
 149
 150                res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
 151
 152                if (flags & PLX_DESC_WB_SUCCESS)
 153                        res.result = DMA_TRANS_NOERROR;
 154                else if (flags & PLX_DESC_WB_WR_FAIL)
 155                        res.result = DMA_TRANS_WRITE_FAILED;
 156                else
 157                        res.result = DMA_TRANS_READ_FAILED;
 158
 159                dma_cookie_complete(&desc->txd);
 160                dma_descriptor_unmap(&desc->txd);
 161                dmaengine_desc_get_callback_invoke(&desc->txd, &res);
 162                desc->txd.callback = NULL;
 163                desc->txd.callback_result = NULL;
 164
 165                plxdev->tail++;
 166        }
 167
 168        spin_unlock_bh(&plxdev->ring_lock);
 169}
 170
 171static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
 172{
 173        struct dmaengine_result res;
 174        struct plx_dma_desc *desc;
 175
 176        plx_dma_process_desc(plxdev);
 177
 178        spin_lock_bh(&plxdev->ring_lock);
 179
 180        while (plxdev->tail != plxdev->head) {
 181                desc = plx_dma_get_desc(plxdev, plxdev->tail);
 182
 183                res.residue = desc->orig_size;
 184                res.result = DMA_TRANS_ABORTED;
 185
 186                dma_cookie_complete(&desc->txd);
 187                dma_descriptor_unmap(&desc->txd);
 188                dmaengine_desc_get_callback_invoke(&desc->txd, &res);
 189                desc->txd.callback = NULL;
 190                desc->txd.callback_result = NULL;
 191
 192                plxdev->tail++;
 193        }
 194
 195        spin_unlock_bh(&plxdev->ring_lock);
 196}
 197
 198static void __plx_dma_stop(struct plx_dma_dev *plxdev)
 199{
 200        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 201        u32 val;
 202
 203        val = readl(plxdev->bar + PLX_REG_CTRL);
 204        if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
 205                return;
 206
 207        writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
 208               plxdev->bar + PLX_REG_CTRL);
 209
 210        while (!time_after(jiffies, timeout)) {
 211                val = readl(plxdev->bar + PLX_REG_CTRL);
 212                if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
 213                        break;
 214
 215                cpu_relax();
 216        }
 217
 218        if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
 219                dev_err(plxdev->dma_dev.dev,
 220                        "Timeout waiting for graceful pause!\n");
 221
 222        writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
 223               plxdev->bar + PLX_REG_CTRL);
 224
 225        writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
 226        writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
 227        writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
 228        writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
 229}
 230
 231static void plx_dma_stop(struct plx_dma_dev *plxdev)
 232{
 233        rcu_read_lock();
 234        if (!rcu_dereference(plxdev->pdev)) {
 235                rcu_read_unlock();
 236                return;
 237        }
 238
 239        __plx_dma_stop(plxdev);
 240
 241        rcu_read_unlock();
 242}
 243
 244static void plx_dma_desc_task(struct tasklet_struct *t)
 245{
 246        struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task);
 247
 248        plx_dma_process_desc(plxdev);
 249}
 250
 251static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
 252                dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
 253                unsigned long flags)
 254        __acquires(plxdev->ring_lock)
 255{
 256        struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
 257        struct plx_dma_desc *plxdesc;
 258
 259        spin_lock_bh(&plxdev->ring_lock);
 260        if (!plxdev->ring_active)
 261                goto err_unlock;
 262
 263        if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
 264                goto err_unlock;
 265
 266        if (len > PLX_DESC_SIZE_MASK)
 267                goto err_unlock;
 268
 269        plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
 270        plxdev->head++;
 271
 272        plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
 273        plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
 274        plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
 275        plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
 276
 277        plxdesc->orig_size = len;
 278
 279        if (flags & DMA_PREP_INTERRUPT)
 280                len |= PLX_DESC_FLAG_INT_WHEN_DONE;
 281
 282        plxdesc->hw->flags_and_size = cpu_to_le32(len);
 283        plxdesc->txd.flags = flags;
 284
 285        /* return with the lock held, it will be released in tx_submit */
 286
 287        return &plxdesc->txd;
 288
 289err_unlock:
 290        /*
 291         * Keep sparse happy by restoring an even lock count on
 292         * this lock.
 293         */
 294        __acquire(plxdev->ring_lock);
 295
 296        spin_unlock_bh(&plxdev->ring_lock);
 297        return NULL;
 298}
 299
 300static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
 301        __releases(plxdev->ring_lock)
 302{
 303        struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
 304        struct plx_dma_desc *plxdesc = to_plx_desc(desc);
 305        dma_cookie_t cookie;
 306
 307        cookie = dma_cookie_assign(desc);
 308
 309        /*
 310         * Ensure the descriptor updates are visible to the dma device
 311         * before setting the valid bit.
 312         */
 313        wmb();
 314
 315        plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
 316
 317        spin_unlock_bh(&plxdev->ring_lock);
 318
 319        return cookie;
 320}
 321
 322static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
 323                dma_cookie_t cookie, struct dma_tx_state *txstate)
 324{
 325        struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
 326        enum dma_status ret;
 327
 328        ret = dma_cookie_status(chan, cookie, txstate);
 329        if (ret == DMA_COMPLETE)
 330                return ret;
 331
 332        plx_dma_process_desc(plxdev);
 333
 334        return dma_cookie_status(chan, cookie, txstate);
 335}
 336
 337static void plx_dma_issue_pending(struct dma_chan *chan)
 338{
 339        struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
 340
 341        rcu_read_lock();
 342        if (!rcu_dereference(plxdev->pdev)) {
 343                rcu_read_unlock();
 344                return;
 345        }
 346
 347        /*
 348         * Ensure the valid bits are visible before starting the
 349         * DMA engine.
 350         */
 351        wmb();
 352
 353        writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
 354
 355        rcu_read_unlock();
 356}
 357
 358static irqreturn_t plx_dma_isr(int irq, void *devid)
 359{
 360        struct plx_dma_dev *plxdev = devid;
 361        u32 status;
 362
 363        status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
 364
 365        if (!status)
 366                return IRQ_NONE;
 367
 368        if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
 369                tasklet_schedule(&plxdev->desc_task);
 370
 371        writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
 372
 373        return IRQ_HANDLED;
 374}
 375
 376static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
 377{
 378        struct plx_dma_desc *desc;
 379        int i;
 380
 381        plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
 382                                    sizeof(*plxdev->desc_ring), GFP_KERNEL);
 383        if (!plxdev->desc_ring)
 384                return -ENOMEM;
 385
 386        for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
 387                desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 388                if (!desc)
 389                        goto free_and_exit;
 390
 391                dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
 392                desc->txd.tx_submit = plx_dma_tx_submit;
 393                desc->hw = &plxdev->hw_ring[i];
 394
 395                plxdev->desc_ring[i] = desc;
 396        }
 397
 398        return 0;
 399
 400free_and_exit:
 401        for (i = 0; i < PLX_DMA_RING_COUNT; i++)
 402                kfree(plxdev->desc_ring[i]);
 403        kfree(plxdev->desc_ring);
 404        return -ENOMEM;
 405}
 406
 407static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
 408{
 409        struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
 410        size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
 411        int rc;
 412
 413        plxdev->head = plxdev->tail = 0;
 414        plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
 415                                             &plxdev->hw_ring_dma, GFP_KERNEL);
 416        if (!plxdev->hw_ring)
 417                return -ENOMEM;
 418
 419        rc = plx_dma_alloc_desc(plxdev);
 420        if (rc)
 421                goto out_free_hw_ring;
 422
 423        rcu_read_lock();
 424        if (!rcu_dereference(plxdev->pdev)) {
 425                rcu_read_unlock();
 426                rc = -ENODEV;
 427                goto out_free_hw_ring;
 428        }
 429
 430        writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
 431        writel(lower_32_bits(plxdev->hw_ring_dma),
 432               plxdev->bar + PLX_REG_DESC_RING_ADDR);
 433        writel(upper_32_bits(plxdev->hw_ring_dma),
 434               plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
 435        writel(lower_32_bits(plxdev->hw_ring_dma),
 436               plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
 437        writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
 438        writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
 439
 440        plxdev->ring_active = true;
 441
 442        rcu_read_unlock();
 443
 444        return PLX_DMA_RING_COUNT;
 445
 446out_free_hw_ring:
 447        dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
 448                          plxdev->hw_ring_dma);
 449        return rc;
 450}
 451
 452static void plx_dma_free_chan_resources(struct dma_chan *chan)
 453{
 454        struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
 455        size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
 456        struct pci_dev *pdev;
 457        int irq = -1;
 458        int i;
 459
 460        spin_lock_bh(&plxdev->ring_lock);
 461        plxdev->ring_active = false;
 462        spin_unlock_bh(&plxdev->ring_lock);
 463
 464        plx_dma_stop(plxdev);
 465
 466        rcu_read_lock();
 467        pdev = rcu_dereference(plxdev->pdev);
 468        if (pdev)
 469                irq = pci_irq_vector(pdev, 0);
 470        rcu_read_unlock();
 471
 472        if (irq > 0)
 473                synchronize_irq(irq);
 474
 475        tasklet_kill(&plxdev->desc_task);
 476
 477        plx_dma_abort_desc(plxdev);
 478
 479        for (i = 0; i < PLX_DMA_RING_COUNT; i++)
 480                kfree(plxdev->desc_ring[i]);
 481
 482        kfree(plxdev->desc_ring);
 483        dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
 484                          plxdev->hw_ring_dma);
 485
 486}
 487
 488static void plx_dma_release(struct dma_device *dma_dev)
 489{
 490        struct plx_dma_dev *plxdev =
 491                container_of(dma_dev, struct plx_dma_dev, dma_dev);
 492
 493        put_device(dma_dev->dev);
 494        kfree(plxdev);
 495}
 496
 497static int plx_dma_create(struct pci_dev *pdev)
 498{
 499        struct plx_dma_dev *plxdev;
 500        struct dma_device *dma;
 501        struct dma_chan *chan;
 502        int rc;
 503
 504        plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
 505        if (!plxdev)
 506                return -ENOMEM;
 507
 508        rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
 509                         KBUILD_MODNAME, plxdev);
 510        if (rc)
 511                goto free_plx;
 512
 513        spin_lock_init(&plxdev->ring_lock);
 514        tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
 515
 516        RCU_INIT_POINTER(plxdev->pdev, pdev);
 517        plxdev->bar = pcim_iomap_table(pdev)[0];
 518
 519        dma = &plxdev->dma_dev;
 520        dma->chancnt = 1;
 521        INIT_LIST_HEAD(&dma->channels);
 522        dma_cap_set(DMA_MEMCPY, dma->cap_mask);
 523        dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
 524        dma->dev = get_device(&pdev->dev);
 525
 526        dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
 527        dma->device_free_chan_resources = plx_dma_free_chan_resources;
 528        dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
 529        dma->device_issue_pending = plx_dma_issue_pending;
 530        dma->device_tx_status = plx_dma_tx_status;
 531        dma->device_release = plx_dma_release;
 532
 533        chan = &plxdev->dma_chan;
 534        chan->device = dma;
 535        dma_cookie_init(chan);
 536        list_add_tail(&chan->device_node, &dma->channels);
 537
 538        rc = dma_async_device_register(dma);
 539        if (rc) {
 540                pci_err(pdev, "Failed to register dma device: %d\n", rc);
 541                goto put_device;
 542        }
 543
 544        pci_set_drvdata(pdev, plxdev);
 545
 546        return 0;
 547
 548put_device:
 549        put_device(&pdev->dev);
 550        free_irq(pci_irq_vector(pdev, 0),  plxdev);
 551free_plx:
 552        kfree(plxdev);
 553
 554        return rc;
 555}
 556
 557static int plx_dma_probe(struct pci_dev *pdev,
 558                         const struct pci_device_id *id)
 559{
 560        int rc;
 561
 562        rc = pcim_enable_device(pdev);
 563        if (rc)
 564                return rc;
 565
 566        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
 567        if (rc)
 568                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 569        if (rc)
 570                return rc;
 571
 572        rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
 573        if (rc)
 574                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 575        if (rc)
 576                return rc;
 577
 578        rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
 579        if (rc)
 580                return rc;
 581
 582        rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
 583        if (rc <= 0)
 584                return rc;
 585
 586        pci_set_master(pdev);
 587
 588        rc = plx_dma_create(pdev);
 589        if (rc)
 590                goto err_free_irq_vectors;
 591
 592        pci_info(pdev, "PLX DMA Channel Registered\n");
 593
 594        return 0;
 595
 596err_free_irq_vectors:
 597        pci_free_irq_vectors(pdev);
 598        return rc;
 599}
 600
 601static void plx_dma_remove(struct pci_dev *pdev)
 602{
 603        struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
 604
 605        free_irq(pci_irq_vector(pdev, 0),  plxdev);
 606
 607        rcu_assign_pointer(plxdev->pdev, NULL);
 608        synchronize_rcu();
 609
 610        spin_lock_bh(&plxdev->ring_lock);
 611        plxdev->ring_active = false;
 612        spin_unlock_bh(&plxdev->ring_lock);
 613
 614        __plx_dma_stop(plxdev);
 615        plx_dma_abort_desc(plxdev);
 616
 617        plxdev->bar = NULL;
 618        dma_async_device_unregister(&plxdev->dma_dev);
 619
 620        pci_free_irq_vectors(pdev);
 621}
 622
 623static const struct pci_device_id plx_dma_pci_tbl[] = {
 624        {
 625                .vendor         = PCI_VENDOR_ID_PLX,
 626                .device         = 0x87D0,
 627                .subvendor      = PCI_ANY_ID,
 628                .subdevice      = PCI_ANY_ID,
 629                .class          = PCI_CLASS_SYSTEM_OTHER << 8,
 630                .class_mask     = 0xFFFFFFFF,
 631        },
 632        {0}
 633};
 634MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
 635
 636static struct pci_driver plx_dma_pci_driver = {
 637        .name           = KBUILD_MODNAME,
 638        .id_table       = plx_dma_pci_tbl,
 639        .probe          = plx_dma_probe,
 640        .remove         = plx_dma_remove,
 641};
 642module_pci_driver(plx_dma_pci_driver);
 643