linux/drivers/firewire/nosy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * nosy - Snoop mode driver for TI PCILynx 1394 controllers
   4 * Copyright (C) 2002-2007 Kristian Høgsberg
   5 */
   6
   7#include <linux/device.h>
   8#include <linux/errno.h>
   9#include <linux/fs.h>
  10#include <linux/init.h>
  11#include <linux/interrupt.h>
  12#include <linux/io.h>
  13#include <linux/kernel.h>
  14#include <linux/kref.h>
  15#include <linux/miscdevice.h>
  16#include <linux/module.h>
  17#include <linux/mutex.h>
  18#include <linux/pci.h>
  19#include <linux/poll.h>
  20#include <linux/sched.h> /* required for linux/wait.h */
  21#include <linux/slab.h>
  22#include <linux/spinlock.h>
  23#include <linux/time64.h>
  24#include <linux/timex.h>
  25#include <linux/uaccess.h>
  26#include <linux/wait.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/atomic.h>
  29#include <asm/byteorder.h>
  30
  31#include "nosy.h"
  32#include "nosy-user.h"
  33
  34#define TCODE_PHY_PACKET                0x10
  35#define PCI_DEVICE_ID_TI_PCILYNX        0x8000
  36
  37static char driver_name[] = KBUILD_MODNAME;
  38
  39/* this is the physical layout of a PCL, its size is 128 bytes */
  40struct pcl {
  41        __le32 next;
  42        __le32 async_error_next;
  43        u32 user_data;
  44        __le32 pcl_status;
  45        __le32 remaining_transfer_count;
  46        __le32 next_data_buffer;
  47        struct {
  48                __le32 control;
  49                __le32 pointer;
  50        } buffer[13];
  51};
  52
  53struct packet {
  54        unsigned int length;
  55        char data[];
  56};
  57
  58struct packet_buffer {
  59        char *data;
  60        size_t capacity;
  61        long total_packet_count, lost_packet_count;
  62        atomic_t size;
  63        struct packet *head, *tail;
  64        wait_queue_head_t wait;
  65};
  66
  67struct pcilynx {
  68        struct pci_dev *pci_device;
  69        __iomem char *registers;
  70
  71        struct pcl *rcv_start_pcl, *rcv_pcl;
  72        __le32 *rcv_buffer;
  73
  74        dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
  75
  76        spinlock_t client_list_lock;
  77        struct list_head client_list;
  78
  79        struct miscdevice misc;
  80        struct list_head link;
  81        struct kref kref;
  82};
  83
  84static inline struct pcilynx *
  85lynx_get(struct pcilynx *lynx)
  86{
  87        kref_get(&lynx->kref);
  88
  89        return lynx;
  90}
  91
  92static void
  93lynx_release(struct kref *kref)
  94{
  95        kfree(container_of(kref, struct pcilynx, kref));
  96}
  97
  98static inline void
  99lynx_put(struct pcilynx *lynx)
 100{
 101        kref_put(&lynx->kref, lynx_release);
 102}
 103
 104struct client {
 105        struct pcilynx *lynx;
 106        u32 tcode_mask;
 107        struct packet_buffer buffer;
 108        struct list_head link;
 109};
 110
 111static DEFINE_MUTEX(card_mutex);
 112static LIST_HEAD(card_list);
 113
 114static int
 115packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
 116{
 117        buffer->data = kmalloc(capacity, GFP_KERNEL);
 118        if (buffer->data == NULL)
 119                return -ENOMEM;
 120        buffer->head = (struct packet *) buffer->data;
 121        buffer->tail = (struct packet *) buffer->data;
 122        buffer->capacity = capacity;
 123        buffer->lost_packet_count = 0;
 124        atomic_set(&buffer->size, 0);
 125        init_waitqueue_head(&buffer->wait);
 126
 127        return 0;
 128}
 129
 130static void
 131packet_buffer_destroy(struct packet_buffer *buffer)
 132{
 133        kfree(buffer->data);
 134}
 135
 136static int
 137packet_buffer_get(struct client *client, char __user *data, size_t user_length)
 138{
 139        struct packet_buffer *buffer = &client->buffer;
 140        size_t length;
 141        char *end;
 142
 143        if (wait_event_interruptible(buffer->wait,
 144                                     atomic_read(&buffer->size) > 0) ||
 145                                     list_empty(&client->lynx->link))
 146                return -ERESTARTSYS;
 147
 148        if (atomic_read(&buffer->size) == 0)
 149                return -ENODEV;
 150
 151        /* FIXME: Check length <= user_length. */
 152
 153        end = buffer->data + buffer->capacity;
 154        length = buffer->head->length;
 155
 156        if (&buffer->head->data[length] < end) {
 157                if (copy_to_user(data, buffer->head->data, length))
 158                        return -EFAULT;
 159                buffer->head = (struct packet *) &buffer->head->data[length];
 160        } else {
 161                size_t split = end - buffer->head->data;
 162
 163                if (copy_to_user(data, buffer->head->data, split))
 164                        return -EFAULT;
 165                if (copy_to_user(data + split, buffer->data, length - split))
 166                        return -EFAULT;
 167                buffer->head = (struct packet *) &buffer->data[length - split];
 168        }
 169
 170        /*
 171         * Decrease buffer->size as the last thing, since this is what
 172         * keeps the interrupt from overwriting the packet we are
 173         * retrieving from the buffer.
 174         */
 175        atomic_sub(sizeof(struct packet) + length, &buffer->size);
 176
 177        return length;
 178}
 179
 180static void
 181packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
 182{
 183        char *end;
 184
 185        buffer->total_packet_count++;
 186
 187        if (buffer->capacity <
 188            atomic_read(&buffer->size) + sizeof(struct packet) + length) {
 189                buffer->lost_packet_count++;
 190                return;
 191        }
 192
 193        end = buffer->data + buffer->capacity;
 194        buffer->tail->length = length;
 195
 196        if (&buffer->tail->data[length] < end) {
 197                memcpy(buffer->tail->data, data, length);
 198                buffer->tail = (struct packet *) &buffer->tail->data[length];
 199        } else {
 200                size_t split = end - buffer->tail->data;
 201
 202                memcpy(buffer->tail->data, data, split);
 203                memcpy(buffer->data, data + split, length - split);
 204                buffer->tail = (struct packet *) &buffer->data[length - split];
 205        }
 206
 207        /* Finally, adjust buffer size and wake up userspace reader. */
 208
 209        atomic_add(sizeof(struct packet) + length, &buffer->size);
 210        wake_up_interruptible(&buffer->wait);
 211}
 212
 213static inline void
 214reg_write(struct pcilynx *lynx, int offset, u32 data)
 215{
 216        writel(data, lynx->registers + offset);
 217}
 218
 219static inline u32
 220reg_read(struct pcilynx *lynx, int offset)
 221{
 222        return readl(lynx->registers + offset);
 223}
 224
 225static inline void
 226reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
 227{
 228        reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
 229}
 230
 231/*
 232 * Maybe the pcl programs could be set up to just append data instead
 233 * of using a whole packet.
 234 */
 235static inline void
 236run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
 237                           int dmachan)
 238{
 239        reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
 240        reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
 241                  DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
 242}
 243
 244static int
 245set_phy_reg(struct pcilynx *lynx, int addr, int val)
 246{
 247        if (addr > 15) {
 248                dev_err(&lynx->pci_device->dev,
 249                        "PHY register address %d out of range\n", addr);
 250                return -1;
 251        }
 252        if (val > 0xff) {
 253                dev_err(&lynx->pci_device->dev,
 254                        "PHY register value %d out of range\n", val);
 255                return -1;
 256        }
 257        reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
 258                  LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
 259
 260        return 0;
 261}
 262
 263static int
 264nosy_open(struct inode *inode, struct file *file)
 265{
 266        int minor = iminor(inode);
 267        struct client *client;
 268        struct pcilynx *tmp, *lynx = NULL;
 269
 270        mutex_lock(&card_mutex);
 271        list_for_each_entry(tmp, &card_list, link)
 272                if (tmp->misc.minor == minor) {
 273                        lynx = lynx_get(tmp);
 274                        break;
 275                }
 276        mutex_unlock(&card_mutex);
 277        if (lynx == NULL)
 278                return -ENODEV;
 279
 280        client = kmalloc(sizeof *client, GFP_KERNEL);
 281        if (client == NULL)
 282                goto fail;
 283
 284        client->tcode_mask = ~0;
 285        client->lynx = lynx;
 286        INIT_LIST_HEAD(&client->link);
 287
 288        if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
 289                goto fail;
 290
 291        file->private_data = client;
 292
 293        return stream_open(inode, file);
 294fail:
 295        kfree(client);
 296        lynx_put(lynx);
 297
 298        return -ENOMEM;
 299}
 300
 301static int
 302nosy_release(struct inode *inode, struct file *file)
 303{
 304        struct client *client = file->private_data;
 305        struct pcilynx *lynx = client->lynx;
 306
 307        spin_lock_irq(&lynx->client_list_lock);
 308        list_del_init(&client->link);
 309        spin_unlock_irq(&lynx->client_list_lock);
 310
 311        packet_buffer_destroy(&client->buffer);
 312        kfree(client);
 313        lynx_put(lynx);
 314
 315        return 0;
 316}
 317
 318static __poll_t
 319nosy_poll(struct file *file, poll_table *pt)
 320{
 321        struct client *client = file->private_data;
 322        __poll_t ret = 0;
 323
 324        poll_wait(file, &client->buffer.wait, pt);
 325
 326        if (atomic_read(&client->buffer.size) > 0)
 327                ret = EPOLLIN | EPOLLRDNORM;
 328
 329        if (list_empty(&client->lynx->link))
 330                ret |= EPOLLHUP;
 331
 332        return ret;
 333}
 334
 335static ssize_t
 336nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
 337{
 338        struct client *client = file->private_data;
 339
 340        return packet_buffer_get(client, buffer, count);
 341}
 342
 343static long
 344nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 345{
 346        struct client *client = file->private_data;
 347        spinlock_t *client_list_lock = &client->lynx->client_list_lock;
 348        struct nosy_stats stats;
 349        int ret;
 350
 351        switch (cmd) {
 352        case NOSY_IOC_GET_STATS:
 353                spin_lock_irq(client_list_lock);
 354                stats.total_packet_count = client->buffer.total_packet_count;
 355                stats.lost_packet_count  = client->buffer.lost_packet_count;
 356                spin_unlock_irq(client_list_lock);
 357
 358                if (copy_to_user((void __user *) arg, &stats, sizeof stats))
 359                        return -EFAULT;
 360                else
 361                        return 0;
 362
 363        case NOSY_IOC_START:
 364                ret = -EBUSY;
 365                spin_lock_irq(client_list_lock);
 366                if (list_empty(&client->link)) {
 367                        list_add_tail(&client->link, &client->lynx->client_list);
 368                        ret = 0;
 369                }
 370                spin_unlock_irq(client_list_lock);
 371
 372                return ret;
 373
 374        case NOSY_IOC_STOP:
 375                spin_lock_irq(client_list_lock);
 376                list_del_init(&client->link);
 377                spin_unlock_irq(client_list_lock);
 378
 379                return 0;
 380
 381        case NOSY_IOC_FILTER:
 382                spin_lock_irq(client_list_lock);
 383                client->tcode_mask = arg;
 384                spin_unlock_irq(client_list_lock);
 385
 386                return 0;
 387
 388        default:
 389                return -EINVAL;
 390                /* Flush buffer, configure filter. */
 391        }
 392}
 393
 394static const struct file_operations nosy_ops = {
 395        .owner =                THIS_MODULE,
 396        .read =                 nosy_read,
 397        .unlocked_ioctl =       nosy_ioctl,
 398        .poll =                 nosy_poll,
 399        .open =                 nosy_open,
 400        .release =              nosy_release,
 401};
 402
 403#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
 404
 405static void
 406packet_irq_handler(struct pcilynx *lynx)
 407{
 408        struct client *client;
 409        u32 tcode_mask, tcode, timestamp;
 410        size_t length;
 411        struct timespec64 ts64;
 412
 413        /* FIXME: Also report rcv_speed. */
 414
 415        length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
 416        tcode  = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
 417
 418        ktime_get_real_ts64(&ts64);
 419        timestamp = ts64.tv_nsec / NSEC_PER_USEC;
 420        lynx->rcv_buffer[0] = (__force __le32)timestamp;
 421
 422        if (length == PHY_PACKET_SIZE)
 423                tcode_mask = 1 << TCODE_PHY_PACKET;
 424        else
 425                tcode_mask = 1 << tcode;
 426
 427        spin_lock(&lynx->client_list_lock);
 428
 429        list_for_each_entry(client, &lynx->client_list, link)
 430                if (client->tcode_mask & tcode_mask)
 431                        packet_buffer_put(&client->buffer,
 432                                          lynx->rcv_buffer, length + 4);
 433
 434        spin_unlock(&lynx->client_list_lock);
 435}
 436
 437static void
 438bus_reset_irq_handler(struct pcilynx *lynx)
 439{
 440        struct client *client;
 441        struct timespec64 ts64;
 442        u32    timestamp;
 443
 444        ktime_get_real_ts64(&ts64);
 445        timestamp = ts64.tv_nsec / NSEC_PER_USEC;
 446
 447        spin_lock(&lynx->client_list_lock);
 448
 449        list_for_each_entry(client, &lynx->client_list, link)
 450                packet_buffer_put(&client->buffer, &timestamp, 4);
 451
 452        spin_unlock(&lynx->client_list_lock);
 453}
 454
 455static irqreturn_t
 456irq_handler(int irq, void *device)
 457{
 458        struct pcilynx *lynx = device;
 459        u32 pci_int_status;
 460
 461        pci_int_status = reg_read(lynx, PCI_INT_STATUS);
 462
 463        if (pci_int_status == ~0)
 464                /* Card was ejected. */
 465                return IRQ_NONE;
 466
 467        if ((pci_int_status & PCI_INT_INT_PEND) == 0)
 468                /* Not our interrupt, bail out quickly. */
 469                return IRQ_NONE;
 470
 471        if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
 472                u32 link_int_status;
 473
 474                link_int_status = reg_read(lynx, LINK_INT_STATUS);
 475                reg_write(lynx, LINK_INT_STATUS, link_int_status);
 476
 477                if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
 478                        bus_reset_irq_handler(lynx);
 479        }
 480
 481        /* Clear the PCI_INT_STATUS register only after clearing the
 482         * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
 483         * be set again immediately. */
 484
 485        reg_write(lynx, PCI_INT_STATUS, pci_int_status);
 486
 487        if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
 488                packet_irq_handler(lynx);
 489                run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
 490        }
 491
 492        return IRQ_HANDLED;
 493}
 494
 495static void
 496remove_card(struct pci_dev *dev)
 497{
 498        struct pcilynx *lynx = pci_get_drvdata(dev);
 499        struct client *client;
 500
 501        mutex_lock(&card_mutex);
 502        list_del_init(&lynx->link);
 503        misc_deregister(&lynx->misc);
 504        mutex_unlock(&card_mutex);
 505
 506        reg_write(lynx, PCI_INT_ENABLE, 0);
 507        free_irq(lynx->pci_device->irq, lynx);
 508
 509        spin_lock_irq(&lynx->client_list_lock);
 510        list_for_each_entry(client, &lynx->client_list, link)
 511                wake_up_interruptible(&client->buffer.wait);
 512        spin_unlock_irq(&lynx->client_list_lock);
 513
 514        dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
 515                          lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
 516        dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
 517                          lynx->rcv_pcl, lynx->rcv_pcl_bus);
 518        dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, lynx->rcv_buffer,
 519                          lynx->rcv_buffer_bus);
 520
 521        iounmap(lynx->registers);
 522        pci_disable_device(dev);
 523        lynx_put(lynx);
 524}
 525
 526#define RCV_BUFFER_SIZE (16 * 1024)
 527
 528static int
 529add_card(struct pci_dev *dev, const struct pci_device_id *unused)
 530{
 531        struct pcilynx *lynx;
 532        u32 p, end;
 533        int ret, i;
 534
 535        if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
 536                dev_err(&dev->dev,
 537                    "DMA address limits not supported for PCILynx hardware\n");
 538                return -ENXIO;
 539        }
 540        if (pci_enable_device(dev)) {
 541                dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
 542                return -ENXIO;
 543        }
 544        pci_set_master(dev);
 545
 546        lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
 547        if (lynx == NULL) {
 548                dev_err(&dev->dev, "Failed to allocate control structure\n");
 549                ret = -ENOMEM;
 550                goto fail_disable;
 551        }
 552        lynx->pci_device = dev;
 553        pci_set_drvdata(dev, lynx);
 554
 555        spin_lock_init(&lynx->client_list_lock);
 556        INIT_LIST_HEAD(&lynx->client_list);
 557        kref_init(&lynx->kref);
 558
 559        lynx->registers = ioremap(pci_resource_start(dev, 0),
 560                                          PCILYNX_MAX_REGISTER);
 561        if (lynx->registers == NULL) {
 562                dev_err(&dev->dev, "Failed to map registers\n");
 563                ret = -ENOMEM;
 564                goto fail_deallocate_lynx;
 565        }
 566
 567        lynx->rcv_start_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
 568                                                 sizeof(struct pcl),
 569                                                 &lynx->rcv_start_pcl_bus,
 570                                                 GFP_KERNEL);
 571        lynx->rcv_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
 572                                           sizeof(struct pcl),
 573                                           &lynx->rcv_pcl_bus, GFP_KERNEL);
 574        lynx->rcv_buffer = dma_alloc_coherent(&lynx->pci_device->dev,
 575                                              RCV_BUFFER_SIZE,
 576                                              &lynx->rcv_buffer_bus, GFP_KERNEL);
 577        if (lynx->rcv_start_pcl == NULL ||
 578            lynx->rcv_pcl == NULL ||
 579            lynx->rcv_buffer == NULL) {
 580                dev_err(&dev->dev, "Failed to allocate receive buffer\n");
 581                ret = -ENOMEM;
 582                goto fail_deallocate_buffers;
 583        }
 584        lynx->rcv_start_pcl->next       = cpu_to_le32(lynx->rcv_pcl_bus);
 585        lynx->rcv_pcl->next             = cpu_to_le32(PCL_NEXT_INVALID);
 586        lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
 587
 588        lynx->rcv_pcl->buffer[0].control =
 589                        cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
 590        lynx->rcv_pcl->buffer[0].pointer =
 591                        cpu_to_le32(lynx->rcv_buffer_bus + 4);
 592        p = lynx->rcv_buffer_bus + 2048;
 593        end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
 594        for (i = 1; p < end; i++, p += 2048) {
 595                lynx->rcv_pcl->buffer[i].control =
 596                        cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
 597                lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
 598        }
 599        lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
 600
 601        reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
 602        /* Fix buggy cards with autoboot pin not tied low: */
 603        reg_write(lynx, DMA0_CHAN_CTRL, 0);
 604        reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
 605
 606#if 0
 607        /* now, looking for PHY register set */
 608        if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
 609                lynx->phyic.reg_1394a = 1;
 610                PRINT(KERN_INFO, lynx->id,
 611                      "found 1394a conform PHY (using extended register set)");
 612                lynx->phyic.vendor = get_phy_vendorid(lynx);
 613                lynx->phyic.product = get_phy_productid(lynx);
 614        } else {
 615                lynx->phyic.reg_1394a = 0;
 616                PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
 617        }
 618#endif
 619
 620        /* Setup the general receive FIFO max size. */
 621        reg_write(lynx, FIFO_SIZES, 255);
 622
 623        reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
 624
 625        reg_write(lynx, LINK_INT_ENABLE,
 626                  LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
 627                  LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
 628                  LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
 629                  LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
 630                  LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
 631
 632        /* Disable the L flag in self ID packets. */
 633        set_phy_reg(lynx, 4, 0);
 634
 635        /* Put this baby into snoop mode */
 636        reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
 637
 638        run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
 639
 640        if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
 641                        driver_name, lynx)) {
 642                dev_err(&dev->dev,
 643                        "Failed to allocate shared interrupt %d\n", dev->irq);
 644                ret = -EIO;
 645                goto fail_deallocate_buffers;
 646        }
 647
 648        lynx->misc.parent = &dev->dev;
 649        lynx->misc.minor = MISC_DYNAMIC_MINOR;
 650        lynx->misc.name = "nosy";
 651        lynx->misc.fops = &nosy_ops;
 652
 653        mutex_lock(&card_mutex);
 654        ret = misc_register(&lynx->misc);
 655        if (ret) {
 656                dev_err(&dev->dev, "Failed to register misc char device\n");
 657                mutex_unlock(&card_mutex);
 658                goto fail_free_irq;
 659        }
 660        list_add_tail(&lynx->link, &card_list);
 661        mutex_unlock(&card_mutex);
 662
 663        dev_info(&dev->dev,
 664                 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
 665
 666        return 0;
 667
 668fail_free_irq:
 669        reg_write(lynx, PCI_INT_ENABLE, 0);
 670        free_irq(lynx->pci_device->irq, lynx);
 671
 672fail_deallocate_buffers:
 673        if (lynx->rcv_start_pcl)
 674                dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
 675                                  lynx->rcv_start_pcl,
 676                                  lynx->rcv_start_pcl_bus);
 677        if (lynx->rcv_pcl)
 678                dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
 679                                  lynx->rcv_pcl, lynx->rcv_pcl_bus);
 680        if (lynx->rcv_buffer)
 681                dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE,
 682                                  lynx->rcv_buffer, lynx->rcv_buffer_bus);
 683        iounmap(lynx->registers);
 684
 685fail_deallocate_lynx:
 686        kfree(lynx);
 687
 688fail_disable:
 689        pci_disable_device(dev);
 690
 691        return ret;
 692}
 693
 694static struct pci_device_id pci_table[] = {
 695        {
 696                .vendor =    PCI_VENDOR_ID_TI,
 697                .device =    PCI_DEVICE_ID_TI_PCILYNX,
 698                .subvendor = PCI_ANY_ID,
 699                .subdevice = PCI_ANY_ID,
 700        },
 701        { }     /* Terminating entry */
 702};
 703
 704MODULE_DEVICE_TABLE(pci, pci_table);
 705
 706static struct pci_driver lynx_pci_driver = {
 707        .name =         driver_name,
 708        .id_table =     pci_table,
 709        .probe =        add_card,
 710        .remove =       remove_card,
 711};
 712
 713module_pci_driver(lynx_pci_driver);
 714
 715MODULE_AUTHOR("Kristian Hoegsberg");
 716MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
 717MODULE_LICENSE("GPL");
 718