linux/drivers/firewire/nosy.c
<<
>>
Prefs
   1/*
   2 * nosy - Snoop mode driver for TI PCILynx 1394 controllers
   3 * Copyright (C) 2002-2007 Kristian Høgsberg
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software Foundation,
  17 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/errno.h>
  22#include <linux/fs.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/io.h>
  26#include <linux/kernel.h>
  27#include <linux/kref.h>
  28#include <linux/miscdevice.h>
  29#include <linux/module.h>
  30#include <linux/mutex.h>
  31#include <linux/pci.h>
  32#include <linux/poll.h>
  33#include <linux/sched.h> /* required for linux/wait.h */
  34#include <linux/slab.h>
  35#include <linux/spinlock.h>
  36#include <linux/time64.h>
  37#include <linux/timex.h>
  38#include <linux/uaccess.h>
  39#include <linux/wait.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/atomic.h>
  42#include <asm/byteorder.h>
  43
  44#include "nosy.h"
  45#include "nosy-user.h"
  46
  47#define TCODE_PHY_PACKET                0x10
  48#define PCI_DEVICE_ID_TI_PCILYNX        0x8000
  49
  50static char driver_name[] = KBUILD_MODNAME;
  51
  52/* this is the physical layout of a PCL, its size is 128 bytes */
  53struct pcl {
  54        __le32 next;
  55        __le32 async_error_next;
  56        u32 user_data;
  57        __le32 pcl_status;
  58        __le32 remaining_transfer_count;
  59        __le32 next_data_buffer;
  60        struct {
  61                __le32 control;
  62                __le32 pointer;
  63        } buffer[13];
  64};
  65
  66struct packet {
  67        unsigned int length;
  68        char data[0];
  69};
  70
  71struct packet_buffer {
  72        char *data;
  73        size_t capacity;
  74        long total_packet_count, lost_packet_count;
  75        atomic_t size;
  76        struct packet *head, *tail;
  77        wait_queue_head_t wait;
  78};
  79
  80struct pcilynx {
  81        struct pci_dev *pci_device;
  82        __iomem char *registers;
  83
  84        struct pcl *rcv_start_pcl, *rcv_pcl;
  85        __le32 *rcv_buffer;
  86
  87        dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
  88
  89        spinlock_t client_list_lock;
  90        struct list_head client_list;
  91
  92        struct miscdevice misc;
  93        struct list_head link;
  94        struct kref kref;
  95};
  96
  97static inline struct pcilynx *
  98lynx_get(struct pcilynx *lynx)
  99{
 100        kref_get(&lynx->kref);
 101
 102        return lynx;
 103}
 104
 105static void
 106lynx_release(struct kref *kref)
 107{
 108        kfree(container_of(kref, struct pcilynx, kref));
 109}
 110
 111static inline void
 112lynx_put(struct pcilynx *lynx)
 113{
 114        kref_put(&lynx->kref, lynx_release);
 115}
 116
 117struct client {
 118        struct pcilynx *lynx;
 119        u32 tcode_mask;
 120        struct packet_buffer buffer;
 121        struct list_head link;
 122};
 123
 124static DEFINE_MUTEX(card_mutex);
 125static LIST_HEAD(card_list);
 126
 127static int
 128packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
 129{
 130        buffer->data = kmalloc(capacity, GFP_KERNEL);
 131        if (buffer->data == NULL)
 132                return -ENOMEM;
 133        buffer->head = (struct packet *) buffer->data;
 134        buffer->tail = (struct packet *) buffer->data;
 135        buffer->capacity = capacity;
 136        buffer->lost_packet_count = 0;
 137        atomic_set(&buffer->size, 0);
 138        init_waitqueue_head(&buffer->wait);
 139
 140        return 0;
 141}
 142
 143static void
 144packet_buffer_destroy(struct packet_buffer *buffer)
 145{
 146        kfree(buffer->data);
 147}
 148
 149static int
 150packet_buffer_get(struct client *client, char __user *data, size_t user_length)
 151{
 152        struct packet_buffer *buffer = &client->buffer;
 153        size_t length;
 154        char *end;
 155
 156        if (wait_event_interruptible(buffer->wait,
 157                                     atomic_read(&buffer->size) > 0) ||
 158                                     list_empty(&client->lynx->link))
 159                return -ERESTARTSYS;
 160
 161        if (atomic_read(&buffer->size) == 0)
 162                return -ENODEV;
 163
 164        /* FIXME: Check length <= user_length. */
 165
 166        end = buffer->data + buffer->capacity;
 167        length = buffer->head->length;
 168
 169        if (&buffer->head->data[length] < end) {
 170                if (copy_to_user(data, buffer->head->data, length))
 171                        return -EFAULT;
 172                buffer->head = (struct packet *) &buffer->head->data[length];
 173        } else {
 174                size_t split = end - buffer->head->data;
 175
 176                if (copy_to_user(data, buffer->head->data, split))
 177                        return -EFAULT;
 178                if (copy_to_user(data + split, buffer->data, length - split))
 179                        return -EFAULT;
 180                buffer->head = (struct packet *) &buffer->data[length - split];
 181        }
 182
 183        /*
 184         * Decrease buffer->size as the last thing, since this is what
 185         * keeps the interrupt from overwriting the packet we are
 186         * retrieving from the buffer.
 187         */
 188        atomic_sub(sizeof(struct packet) + length, &buffer->size);
 189
 190        return length;
 191}
 192
 193static void
 194packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
 195{
 196        char *end;
 197
 198        buffer->total_packet_count++;
 199
 200        if (buffer->capacity <
 201            atomic_read(&buffer->size) + sizeof(struct packet) + length) {
 202                buffer->lost_packet_count++;
 203                return;
 204        }
 205
 206        end = buffer->data + buffer->capacity;
 207        buffer->tail->length = length;
 208
 209        if (&buffer->tail->data[length] < end) {
 210                memcpy(buffer->tail->data, data, length);
 211                buffer->tail = (struct packet *) &buffer->tail->data[length];
 212        } else {
 213                size_t split = end - buffer->tail->data;
 214
 215                memcpy(buffer->tail->data, data, split);
 216                memcpy(buffer->data, data + split, length - split);
 217                buffer->tail = (struct packet *) &buffer->data[length - split];
 218        }
 219
 220        /* Finally, adjust buffer size and wake up userspace reader. */
 221
 222        atomic_add(sizeof(struct packet) + length, &buffer->size);
 223        wake_up_interruptible(&buffer->wait);
 224}
 225
 226static inline void
 227reg_write(struct pcilynx *lynx, int offset, u32 data)
 228{
 229        writel(data, lynx->registers + offset);
 230}
 231
 232static inline u32
 233reg_read(struct pcilynx *lynx, int offset)
 234{
 235        return readl(lynx->registers + offset);
 236}
 237
 238static inline void
 239reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
 240{
 241        reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
 242}
 243
 244/*
 245 * Maybe the pcl programs could be set up to just append data instead
 246 * of using a whole packet.
 247 */
 248static inline void
 249run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
 250                           int dmachan)
 251{
 252        reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
 253        reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
 254                  DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
 255}
 256
 257static int
 258set_phy_reg(struct pcilynx *lynx, int addr, int val)
 259{
 260        if (addr > 15) {
 261                dev_err(&lynx->pci_device->dev,
 262                        "PHY register address %d out of range\n", addr);
 263                return -1;
 264        }
 265        if (val > 0xff) {
 266                dev_err(&lynx->pci_device->dev,
 267                        "PHY register value %d out of range\n", val);
 268                return -1;
 269        }
 270        reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
 271                  LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
 272
 273        return 0;
 274}
 275
 276static int
 277nosy_open(struct inode *inode, struct file *file)
 278{
 279        int minor = iminor(inode);
 280        struct client *client;
 281        struct pcilynx *tmp, *lynx = NULL;
 282
 283        mutex_lock(&card_mutex);
 284        list_for_each_entry(tmp, &card_list, link)
 285                if (tmp->misc.minor == minor) {
 286                        lynx = lynx_get(tmp);
 287                        break;
 288                }
 289        mutex_unlock(&card_mutex);
 290        if (lynx == NULL)
 291                return -ENODEV;
 292
 293        client = kmalloc(sizeof *client, GFP_KERNEL);
 294        if (client == NULL)
 295                goto fail;
 296
 297        client->tcode_mask = ~0;
 298        client->lynx = lynx;
 299        INIT_LIST_HEAD(&client->link);
 300
 301        if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
 302                goto fail;
 303
 304        file->private_data = client;
 305
 306        return nonseekable_open(inode, file);
 307fail:
 308        kfree(client);
 309        lynx_put(lynx);
 310
 311        return -ENOMEM;
 312}
 313
 314static int
 315nosy_release(struct inode *inode, struct file *file)
 316{
 317        struct client *client = file->private_data;
 318        struct pcilynx *lynx = client->lynx;
 319
 320        spin_lock_irq(&lynx->client_list_lock);
 321        list_del_init(&client->link);
 322        spin_unlock_irq(&lynx->client_list_lock);
 323
 324        packet_buffer_destroy(&client->buffer);
 325        kfree(client);
 326        lynx_put(lynx);
 327
 328        return 0;
 329}
 330
 331static unsigned int
 332nosy_poll(struct file *file, poll_table *pt)
 333{
 334        struct client *client = file->private_data;
 335        unsigned int ret = 0;
 336
 337        poll_wait(file, &client->buffer.wait, pt);
 338
 339        if (atomic_read(&client->buffer.size) > 0)
 340                ret = POLLIN | POLLRDNORM;
 341
 342        if (list_empty(&client->lynx->link))
 343                ret |= POLLHUP;
 344
 345        return ret;
 346}
 347
 348static ssize_t
 349nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
 350{
 351        struct client *client = file->private_data;
 352
 353        return packet_buffer_get(client, buffer, count);
 354}
 355
 356static long
 357nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 358{
 359        struct client *client = file->private_data;
 360        spinlock_t *client_list_lock = &client->lynx->client_list_lock;
 361        struct nosy_stats stats;
 362
 363        switch (cmd) {
 364        case NOSY_IOC_GET_STATS:
 365                spin_lock_irq(client_list_lock);
 366                stats.total_packet_count = client->buffer.total_packet_count;
 367                stats.lost_packet_count  = client->buffer.lost_packet_count;
 368                spin_unlock_irq(client_list_lock);
 369
 370                if (copy_to_user((void __user *) arg, &stats, sizeof stats))
 371                        return -EFAULT;
 372                else
 373                        return 0;
 374
 375        case NOSY_IOC_START:
 376                spin_lock_irq(client_list_lock);
 377                list_add_tail(&client->link, &client->lynx->client_list);
 378                spin_unlock_irq(client_list_lock);
 379
 380                return 0;
 381
 382        case NOSY_IOC_STOP:
 383                spin_lock_irq(client_list_lock);
 384                list_del_init(&client->link);
 385                spin_unlock_irq(client_list_lock);
 386
 387                return 0;
 388
 389        case NOSY_IOC_FILTER:
 390                spin_lock_irq(client_list_lock);
 391                client->tcode_mask = arg;
 392                spin_unlock_irq(client_list_lock);
 393
 394                return 0;
 395
 396        default:
 397                return -EINVAL;
 398                /* Flush buffer, configure filter. */
 399        }
 400}
 401
 402static const struct file_operations nosy_ops = {
 403        .owner =                THIS_MODULE,
 404        .read =                 nosy_read,
 405        .unlocked_ioctl =       nosy_ioctl,
 406        .poll =                 nosy_poll,
 407        .open =                 nosy_open,
 408        .release =              nosy_release,
 409};
 410
 411#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
 412
 413static void
 414packet_irq_handler(struct pcilynx *lynx)
 415{
 416        struct client *client;
 417        u32 tcode_mask, tcode, timestamp;
 418        size_t length;
 419        struct timespec64 ts64;
 420
 421        /* FIXME: Also report rcv_speed. */
 422
 423        length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
 424        tcode  = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
 425
 426        ktime_get_real_ts64(&ts64);
 427        timestamp = ts64.tv_nsec / NSEC_PER_USEC;
 428        lynx->rcv_buffer[0] = (__force __le32)timestamp;
 429
 430        if (length == PHY_PACKET_SIZE)
 431                tcode_mask = 1 << TCODE_PHY_PACKET;
 432        else
 433                tcode_mask = 1 << tcode;
 434
 435        spin_lock(&lynx->client_list_lock);
 436
 437        list_for_each_entry(client, &lynx->client_list, link)
 438                if (client->tcode_mask & tcode_mask)
 439                        packet_buffer_put(&client->buffer,
 440                                          lynx->rcv_buffer, length + 4);
 441
 442        spin_unlock(&lynx->client_list_lock);
 443}
 444
 445static void
 446bus_reset_irq_handler(struct pcilynx *lynx)
 447{
 448        struct client *client;
 449        struct timespec64 ts64;
 450        u32    timestamp;
 451
 452        ktime_get_real_ts64(&ts64);
 453        timestamp = ts64.tv_nsec / NSEC_PER_USEC;
 454
 455        spin_lock(&lynx->client_list_lock);
 456
 457        list_for_each_entry(client, &lynx->client_list, link)
 458                packet_buffer_put(&client->buffer, &timestamp, 4);
 459
 460        spin_unlock(&lynx->client_list_lock);
 461}
 462
 463static irqreturn_t
 464irq_handler(int irq, void *device)
 465{
 466        struct pcilynx *lynx = device;
 467        u32 pci_int_status;
 468
 469        pci_int_status = reg_read(lynx, PCI_INT_STATUS);
 470
 471        if (pci_int_status == ~0)
 472                /* Card was ejected. */
 473                return IRQ_NONE;
 474
 475        if ((pci_int_status & PCI_INT_INT_PEND) == 0)
 476                /* Not our interrupt, bail out quickly. */
 477                return IRQ_NONE;
 478
 479        if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
 480                u32 link_int_status;
 481
 482                link_int_status = reg_read(lynx, LINK_INT_STATUS);
 483                reg_write(lynx, LINK_INT_STATUS, link_int_status);
 484
 485                if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
 486                        bus_reset_irq_handler(lynx);
 487        }
 488
 489        /* Clear the PCI_INT_STATUS register only after clearing the
 490         * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
 491         * be set again immediately. */
 492
 493        reg_write(lynx, PCI_INT_STATUS, pci_int_status);
 494
 495        if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
 496                packet_irq_handler(lynx);
 497                run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
 498        }
 499
 500        return IRQ_HANDLED;
 501}
 502
 503static void
 504remove_card(struct pci_dev *dev)
 505{
 506        struct pcilynx *lynx = pci_get_drvdata(dev);
 507        struct client *client;
 508
 509        mutex_lock(&card_mutex);
 510        list_del_init(&lynx->link);
 511        misc_deregister(&lynx->misc);
 512        mutex_unlock(&card_mutex);
 513
 514        reg_write(lynx, PCI_INT_ENABLE, 0);
 515        free_irq(lynx->pci_device->irq, lynx);
 516
 517        spin_lock_irq(&lynx->client_list_lock);
 518        list_for_each_entry(client, &lynx->client_list, link)
 519                wake_up_interruptible(&client->buffer.wait);
 520        spin_unlock_irq(&lynx->client_list_lock);
 521
 522        pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
 523                            lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
 524        pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
 525                            lynx->rcv_pcl, lynx->rcv_pcl_bus);
 526        pci_free_consistent(lynx->pci_device, PAGE_SIZE,
 527                            lynx->rcv_buffer, lynx->rcv_buffer_bus);
 528
 529        iounmap(lynx->registers);
 530        pci_disable_device(dev);
 531        lynx_put(lynx);
 532}
 533
 534#define RCV_BUFFER_SIZE (16 * 1024)
 535
 536static int
 537add_card(struct pci_dev *dev, const struct pci_device_id *unused)
 538{
 539        struct pcilynx *lynx;
 540        u32 p, end;
 541        int ret, i;
 542
 543        if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
 544                dev_err(&dev->dev,
 545                    "DMA address limits not supported for PCILynx hardware\n");
 546                return -ENXIO;
 547        }
 548        if (pci_enable_device(dev)) {
 549                dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
 550                return -ENXIO;
 551        }
 552        pci_set_master(dev);
 553
 554        lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
 555        if (lynx == NULL) {
 556                dev_err(&dev->dev, "Failed to allocate control structure\n");
 557                ret = -ENOMEM;
 558                goto fail_disable;
 559        }
 560        lynx->pci_device = dev;
 561        pci_set_drvdata(dev, lynx);
 562
 563        spin_lock_init(&lynx->client_list_lock);
 564        INIT_LIST_HEAD(&lynx->client_list);
 565        kref_init(&lynx->kref);
 566
 567        lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
 568                                          PCILYNX_MAX_REGISTER);
 569        if (lynx->registers == NULL) {
 570                dev_err(&dev->dev, "Failed to map registers\n");
 571                ret = -ENOMEM;
 572                goto fail_deallocate_lynx;
 573        }
 574
 575        lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
 576                                sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
 577        lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
 578                                sizeof(struct pcl), &lynx->rcv_pcl_bus);
 579        lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
 580                                RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
 581        if (lynx->rcv_start_pcl == NULL ||
 582            lynx->rcv_pcl == NULL ||
 583            lynx->rcv_buffer == NULL) {
 584                dev_err(&dev->dev, "Failed to allocate receive buffer\n");
 585                ret = -ENOMEM;
 586                goto fail_deallocate_buffers;
 587        }
 588        lynx->rcv_start_pcl->next       = cpu_to_le32(lynx->rcv_pcl_bus);
 589        lynx->rcv_pcl->next             = cpu_to_le32(PCL_NEXT_INVALID);
 590        lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
 591
 592        lynx->rcv_pcl->buffer[0].control =
 593                        cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
 594        lynx->rcv_pcl->buffer[0].pointer =
 595                        cpu_to_le32(lynx->rcv_buffer_bus + 4);
 596        p = lynx->rcv_buffer_bus + 2048;
 597        end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
 598        for (i = 1; p < end; i++, p += 2048) {
 599                lynx->rcv_pcl->buffer[i].control =
 600                        cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
 601                lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
 602        }
 603        lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
 604
 605        reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
 606        /* Fix buggy cards with autoboot pin not tied low: */
 607        reg_write(lynx, DMA0_CHAN_CTRL, 0);
 608        reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
 609
 610#if 0
 611        /* now, looking for PHY register set */
 612        if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
 613                lynx->phyic.reg_1394a = 1;
 614                PRINT(KERN_INFO, lynx->id,
 615                      "found 1394a conform PHY (using extended register set)");
 616                lynx->phyic.vendor = get_phy_vendorid(lynx);
 617                lynx->phyic.product = get_phy_productid(lynx);
 618        } else {
 619                lynx->phyic.reg_1394a = 0;
 620                PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
 621        }
 622#endif
 623
 624        /* Setup the general receive FIFO max size. */
 625        reg_write(lynx, FIFO_SIZES, 255);
 626
 627        reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
 628
 629        reg_write(lynx, LINK_INT_ENABLE,
 630                  LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
 631                  LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
 632                  LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
 633                  LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
 634                  LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
 635
 636        /* Disable the L flag in self ID packets. */
 637        set_phy_reg(lynx, 4, 0);
 638
 639        /* Put this baby into snoop mode */
 640        reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
 641
 642        run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
 643
 644        if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
 645                        driver_name, lynx)) {
 646                dev_err(&dev->dev,
 647                        "Failed to allocate shared interrupt %d\n", dev->irq);
 648                ret = -EIO;
 649                goto fail_deallocate_buffers;
 650        }
 651
 652        lynx->misc.parent = &dev->dev;
 653        lynx->misc.minor = MISC_DYNAMIC_MINOR;
 654        lynx->misc.name = "nosy";
 655        lynx->misc.fops = &nosy_ops;
 656
 657        mutex_lock(&card_mutex);
 658        ret = misc_register(&lynx->misc);
 659        if (ret) {
 660                dev_err(&dev->dev, "Failed to register misc char device\n");
 661                mutex_unlock(&card_mutex);
 662                goto fail_free_irq;
 663        }
 664        list_add_tail(&lynx->link, &card_list);
 665        mutex_unlock(&card_mutex);
 666
 667        dev_info(&dev->dev,
 668                 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
 669
 670        return 0;
 671
 672fail_free_irq:
 673        reg_write(lynx, PCI_INT_ENABLE, 0);
 674        free_irq(lynx->pci_device->irq, lynx);
 675
 676fail_deallocate_buffers:
 677        if (lynx->rcv_start_pcl)
 678                pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
 679                                lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
 680        if (lynx->rcv_pcl)
 681                pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
 682                                lynx->rcv_pcl, lynx->rcv_pcl_bus);
 683        if (lynx->rcv_buffer)
 684                pci_free_consistent(lynx->pci_device, PAGE_SIZE,
 685                                lynx->rcv_buffer, lynx->rcv_buffer_bus);
 686        iounmap(lynx->registers);
 687
 688fail_deallocate_lynx:
 689        kfree(lynx);
 690
 691fail_disable:
 692        pci_disable_device(dev);
 693
 694        return ret;
 695}
 696
 697static struct pci_device_id pci_table[] = {
 698        {
 699                .vendor =    PCI_VENDOR_ID_TI,
 700                .device =    PCI_DEVICE_ID_TI_PCILYNX,
 701                .subvendor = PCI_ANY_ID,
 702                .subdevice = PCI_ANY_ID,
 703        },
 704        { }     /* Terminating entry */
 705};
 706
 707MODULE_DEVICE_TABLE(pci, pci_table);
 708
 709static struct pci_driver lynx_pci_driver = {
 710        .name =         driver_name,
 711        .id_table =     pci_table,
 712        .probe =        add_card,
 713        .remove =       remove_card,
 714};
 715
 716module_pci_driver(lynx_pci_driver);
 717
 718MODULE_AUTHOR("Kristian Hoegsberg");
 719MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
 720MODULE_LICENSE("GPL");
 721