linux/drivers/net/wan/wanxl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * wanXL serial card driver for Linux
   4 * host part
   5 *
   6 * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
   7 *
   8 * Status:
   9 *   - Only DTE (external clock) support with NRZ and NRZI encodings
  10 *   - wanXL100 will require minor driver modifications, no access to hw
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/kernel.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/types.h>
  20#include <linux/fcntl.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/ioport.h>
  26#include <linux/netdevice.h>
  27#include <linux/hdlc.h>
  28#include <linux/pci.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/delay.h>
  31#include <asm/io.h>
  32
  33#include "wanxl.h"
  34
  35static const char *version = "wanXL serial card driver version: 0.48";
  36
  37#define PLX_CTL_RESET   0x40000000 /* adapter reset */
  38
  39#undef DEBUG_PKT
  40#undef DEBUG_PCI
  41
  42/* MAILBOX #1 - PUTS COMMANDS */
  43#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
  44#ifdef __LITTLE_ENDIAN
  45#define MBX1_CMD_BSWAP  0x8C000001 /* little-endian Byte Swap Mode */
  46#else
  47#define MBX1_CMD_BSWAP  0x8C000000 /* big-endian Byte Swap Mode */
  48#endif
  49
  50/* MAILBOX #2 - DRAM SIZE */
  51#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
  52
  53struct port {
  54        struct net_device *dev;
  55        struct card *card;
  56        spinlock_t lock;        /* for wanxl_xmit */
  57        int node;               /* physical port #0 - 3 */
  58        unsigned int clock_type;
  59        int tx_in, tx_out;
  60        struct sk_buff *tx_skbs[TX_BUFFERS];
  61};
  62
  63struct card_status {
  64        desc_t rx_descs[RX_QUEUE_LENGTH];
  65        port_status_t port_status[4];
  66};
  67
  68struct card {
  69        int n_ports;            /* 1, 2 or 4 ports */
  70        u8 irq;
  71
  72        u8 __iomem *plx;        /* PLX PCI9060 virtual base address */
  73        struct pci_dev *pdev;   /* for pci_name(pdev) */
  74        int rx_in;
  75        struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
  76        struct card_status *status;     /* shared between host and card */
  77        dma_addr_t status_address;
  78        struct port ports[];    /* 1 - 4 port structures follow */
  79};
  80
  81static inline struct port *dev_to_port(struct net_device *dev)
  82{
  83        return (struct port *)dev_to_hdlc(dev)->priv;
  84}
  85
  86static inline port_status_t *get_status(struct port *port)
  87{
  88        return &port->card->status->port_status[port->node];
  89}
  90
  91#ifdef DEBUG_PCI
  92static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
  93                                              size_t size, int direction)
  94{
  95        dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
  96
  97        if (addr + size > 0x100000000LL)
  98                pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
  99                        pci_name(pdev), (unsigned long long)addr);
 100        return addr;
 101}
 102
 103#undef pci_map_single
 104#define pci_map_single pci_map_single_debug
 105#endif
 106
 107/* Cable and/or personality module change interrupt service */
 108static inline void wanxl_cable_intr(struct port *port)
 109{
 110        u32 value = get_status(port)->cable;
 111        int valid = 1;
 112        const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
 113
 114        switch (value & 0x7) {
 115        case STATUS_CABLE_V35:
 116                cable = "V.35";
 117                break;
 118        case STATUS_CABLE_X21:
 119                cable = "X.21";
 120                break;
 121        case STATUS_CABLE_V24:
 122                cable = "V.24";
 123                break;
 124        case STATUS_CABLE_EIA530:
 125                cable = "EIA530";
 126                break;
 127        case STATUS_CABLE_NONE:
 128                cable = "no";
 129                break;
 130        default:
 131                cable = "invalid";
 132        }
 133
 134        switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
 135        case STATUS_CABLE_V35:
 136                pm = "V.35";
 137                break;
 138        case STATUS_CABLE_X21:
 139                pm = "X.21";
 140                break;
 141        case STATUS_CABLE_V24:
 142                pm = "V.24";
 143                break;
 144        case STATUS_CABLE_EIA530:
 145                pm = "EIA530";
 146                break;
 147        case STATUS_CABLE_NONE:
 148                pm = "no personality";
 149                valid = 0;
 150                break;
 151        default:
 152                pm = "invalid personality";
 153                valid = 0;
 154        }
 155
 156        if (valid) {
 157                if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
 158                        dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
 159                                ", DSR off";
 160                        dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
 161                                ", carrier off";
 162                }
 163                dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
 164        }
 165        netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
 166                    pm, dte, cable, dsr, dcd);
 167
 168        if (value & STATUS_CABLE_DCD)
 169                netif_carrier_on(port->dev);
 170        else
 171                netif_carrier_off(port->dev);
 172}
 173
 174/* Transmit complete interrupt service */
 175static inline void wanxl_tx_intr(struct port *port)
 176{
 177        struct net_device *dev = port->dev;
 178
 179        while (1) {
 180                desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
 181                struct sk_buff *skb = port->tx_skbs[port->tx_in];
 182
 183                switch (desc->stat) {
 184                case PACKET_FULL:
 185                case PACKET_EMPTY:
 186                        netif_wake_queue(dev);
 187                        return;
 188
 189                case PACKET_UNDERRUN:
 190                        dev->stats.tx_errors++;
 191                        dev->stats.tx_fifo_errors++;
 192                        break;
 193
 194                default:
 195                        dev->stats.tx_packets++;
 196                        dev->stats.tx_bytes += skb->len;
 197                }
 198                desc->stat = PACKET_EMPTY; /* Free descriptor */
 199                dma_unmap_single(&port->card->pdev->dev, desc->address,
 200                                 skb->len, DMA_TO_DEVICE);
 201                dev_consume_skb_irq(skb);
 202                port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
 203        }
 204}
 205
 206/* Receive complete interrupt service */
 207static inline void wanxl_rx_intr(struct card *card)
 208{
 209        desc_t *desc;
 210
 211        while (desc = &card->status->rx_descs[card->rx_in],
 212               desc->stat != PACKET_EMPTY) {
 213                if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) {
 214                        pr_crit("%s: received packet for nonexistent port\n",
 215                                pci_name(card->pdev));
 216                } else {
 217                        struct sk_buff *skb = card->rx_skbs[card->rx_in];
 218                        struct port *port = &card->ports[desc->stat &
 219                                                    PACKET_PORT_MASK];
 220                        struct net_device *dev = port->dev;
 221
 222                        if (!skb) {
 223                                dev->stats.rx_dropped++;
 224                        } else {
 225                                dma_unmap_single(&card->pdev->dev,
 226                                                 desc->address, BUFFER_LENGTH,
 227                                                 DMA_FROM_DEVICE);
 228                                skb_put(skb, desc->length);
 229
 230#ifdef DEBUG_PKT
 231                                printk(KERN_DEBUG "%s RX(%i):", dev->name,
 232                                       skb->len);
 233                                debug_frame(skb);
 234#endif
 235                                dev->stats.rx_packets++;
 236                                dev->stats.rx_bytes += skb->len;
 237                                skb->protocol = hdlc_type_trans(skb, dev);
 238                                netif_rx(skb);
 239                                skb = NULL;
 240                        }
 241
 242                        if (!skb) {
 243                                skb = dev_alloc_skb(BUFFER_LENGTH);
 244                                desc->address = skb ?
 245                                        dma_map_single(&card->pdev->dev,
 246                                                       skb->data,
 247                                                       BUFFER_LENGTH,
 248                                                       DMA_FROM_DEVICE) : 0;
 249                                card->rx_skbs[card->rx_in] = skb;
 250                        }
 251                }
 252                desc->stat = PACKET_EMPTY; /* Free descriptor */
 253                card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
 254        }
 255}
 256
 257static irqreturn_t wanxl_intr(int irq, void *dev_id)
 258{
 259        struct card *card = dev_id;
 260        int i;
 261        u32 stat;
 262        int handled = 0;
 263
 264        while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
 265                handled = 1;
 266                writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
 267
 268                for (i = 0; i < card->n_ports; i++) {
 269                        if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
 270                                wanxl_tx_intr(&card->ports[i]);
 271                        if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
 272                                wanxl_cable_intr(&card->ports[i]);
 273                }
 274                if (stat & (1 << DOORBELL_FROM_CARD_RX))
 275                        wanxl_rx_intr(card);
 276        }
 277
 278        return IRQ_RETVAL(handled);
 279}
 280
 281static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
 282{
 283        struct port *port = dev_to_port(dev);
 284        desc_t *desc;
 285
 286        spin_lock(&port->lock);
 287
 288        desc = &get_status(port)->tx_descs[port->tx_out];
 289        if (desc->stat != PACKET_EMPTY) {
 290                /* should never happen - previous xmit should stop queue */
 291#ifdef DEBUG_PKT
 292                printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
 293#endif
 294                netif_stop_queue(dev);
 295                spin_unlock(&port->lock);
 296                return NETDEV_TX_BUSY;       /* request packet to be queued */
 297        }
 298
 299#ifdef DEBUG_PKT
 300        printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
 301        debug_frame(skb);
 302#endif
 303
 304        port->tx_skbs[port->tx_out] = skb;
 305        desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
 306                                       skb->len, DMA_TO_DEVICE);
 307        desc->length = skb->len;
 308        desc->stat = PACKET_FULL;
 309        writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
 310               port->card->plx + PLX_DOORBELL_TO_CARD);
 311
 312        port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
 313
 314        if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
 315                netif_stop_queue(dev);
 316#ifdef DEBUG_PKT
 317                printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
 318#endif
 319        }
 320
 321        spin_unlock(&port->lock);
 322        return NETDEV_TX_OK;
 323}
 324
 325static int wanxl_attach(struct net_device *dev, unsigned short encoding,
 326                        unsigned short parity)
 327{
 328        struct port *port = dev_to_port(dev);
 329
 330        if (encoding != ENCODING_NRZ &&
 331            encoding != ENCODING_NRZI)
 332                return -EINVAL;
 333
 334        if (parity != PARITY_NONE &&
 335            parity != PARITY_CRC32_PR1_CCITT &&
 336            parity != PARITY_CRC16_PR1_CCITT &&
 337            parity != PARITY_CRC32_PR0_CCITT &&
 338            parity != PARITY_CRC16_PR0_CCITT)
 339                return -EINVAL;
 340
 341        get_status(port)->encoding = encoding;
 342        get_status(port)->parity = parity;
 343        return 0;
 344}
 345
 346static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs)
 347{
 348        const size_t size = sizeof(sync_serial_settings);
 349        sync_serial_settings line;
 350        struct port *port = dev_to_port(dev);
 351
 352        switch (ifs->type) {
 353        case IF_GET_IFACE:
 354                ifs->type = IF_IFACE_SYNC_SERIAL;
 355                if (ifs->size < size) {
 356                        ifs->size = size; /* data size wanted */
 357                        return -ENOBUFS;
 358                }
 359                memset(&line, 0, sizeof(line));
 360                line.clock_type = get_status(port)->clocking;
 361                line.clock_rate = 0;
 362                line.loopback = 0;
 363
 364                if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
 365                        return -EFAULT;
 366                return 0;
 367
 368        case IF_IFACE_SYNC_SERIAL:
 369                if (!capable(CAP_NET_ADMIN))
 370                        return -EPERM;
 371                if (dev->flags & IFF_UP)
 372                        return -EBUSY;
 373
 374                if (copy_from_user(&line, ifs->ifs_ifsu.sync,
 375                                   size))
 376                        return -EFAULT;
 377
 378                if (line.clock_type != CLOCK_EXT &&
 379                    line.clock_type != CLOCK_TXFROMRX)
 380                        return -EINVAL; /* No such clock setting */
 381
 382                if (line.loopback != 0)
 383                        return -EINVAL;
 384
 385                get_status(port)->clocking = line.clock_type;
 386                return 0;
 387
 388        default:
 389                return hdlc_ioctl(dev, ifs);
 390        }
 391}
 392
 393static int wanxl_open(struct net_device *dev)
 394{
 395        struct port *port = dev_to_port(dev);
 396        u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
 397        unsigned long timeout;
 398        int i;
 399
 400        if (get_status(port)->open) {
 401                netdev_err(dev, "port already open\n");
 402                return -EIO;
 403        }
 404
 405        i = hdlc_open(dev);
 406        if (i)
 407                return i;
 408
 409        port->tx_in = port->tx_out = 0;
 410        for (i = 0; i < TX_BUFFERS; i++)
 411                get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
 412        /* signal the card */
 413        writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
 414
 415        timeout = jiffies + HZ;
 416        do {
 417                if (get_status(port)->open) {
 418                        netif_start_queue(dev);
 419                        return 0;
 420                }
 421        } while (time_after(timeout, jiffies));
 422
 423        netdev_err(dev, "unable to open port\n");
 424        /* ask the card to close the port, should it be still alive */
 425        writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
 426        return -EFAULT;
 427}
 428
 429static int wanxl_close(struct net_device *dev)
 430{
 431        struct port *port = dev_to_port(dev);
 432        unsigned long timeout;
 433        int i;
 434
 435        hdlc_close(dev);
 436        /* signal the card */
 437        writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
 438               port->card->plx + PLX_DOORBELL_TO_CARD);
 439
 440        timeout = jiffies + HZ;
 441        do {
 442                if (!get_status(port)->open)
 443                        break;
 444        } while (time_after(timeout, jiffies));
 445
 446        if (get_status(port)->open)
 447                netdev_err(dev, "unable to close port\n");
 448
 449        netif_stop_queue(dev);
 450
 451        for (i = 0; i < TX_BUFFERS; i++) {
 452                desc_t *desc = &get_status(port)->tx_descs[i];
 453
 454                if (desc->stat != PACKET_EMPTY) {
 455                        desc->stat = PACKET_EMPTY;
 456                        dma_unmap_single(&port->card->pdev->dev,
 457                                         desc->address, port->tx_skbs[i]->len,
 458                                         DMA_TO_DEVICE);
 459                        dev_kfree_skb(port->tx_skbs[i]);
 460                }
 461        }
 462        return 0;
 463}
 464
 465static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
 466{
 467        struct port *port = dev_to_port(dev);
 468
 469        dev->stats.rx_over_errors = get_status(port)->rx_overruns;
 470        dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
 471        dev->stats.rx_errors = dev->stats.rx_over_errors +
 472                dev->stats.rx_frame_errors;
 473        return &dev->stats;
 474}
 475
 476static int wanxl_puts_command(struct card *card, u32 cmd)
 477{
 478        unsigned long timeout = jiffies + 5 * HZ;
 479
 480        writel(cmd, card->plx + PLX_MAILBOX_1);
 481        do {
 482                if (readl(card->plx + PLX_MAILBOX_1) == 0)
 483                        return 0;
 484
 485                schedule();
 486        } while (time_after(timeout, jiffies));
 487
 488        return -1;
 489}
 490
 491static void wanxl_reset(struct card *card)
 492{
 493        u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
 494
 495        writel(0x80, card->plx + PLX_MAILBOX_0);
 496        writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
 497        readl(card->plx + PLX_CONTROL); /* wait for posted write */
 498        udelay(1);
 499        writel(old_value, card->plx + PLX_CONTROL);
 500        readl(card->plx + PLX_CONTROL); /* wait for posted write */
 501}
 502
 503static void wanxl_pci_remove_one(struct pci_dev *pdev)
 504{
 505        struct card *card = pci_get_drvdata(pdev);
 506        int i;
 507
 508        for (i = 0; i < card->n_ports; i++) {
 509                unregister_hdlc_device(card->ports[i].dev);
 510                free_netdev(card->ports[i].dev);
 511        }
 512
 513        /* unregister and free all host resources */
 514        if (card->irq)
 515                free_irq(card->irq, card);
 516
 517        wanxl_reset(card);
 518
 519        for (i = 0; i < RX_QUEUE_LENGTH; i++)
 520                if (card->rx_skbs[i]) {
 521                        dma_unmap_single(&card->pdev->dev,
 522                                         card->status->rx_descs[i].address,
 523                                         BUFFER_LENGTH, DMA_FROM_DEVICE);
 524                        dev_kfree_skb(card->rx_skbs[i]);
 525                }
 526
 527        if (card->plx)
 528                iounmap(card->plx);
 529
 530        if (card->status)
 531                dma_free_coherent(&pdev->dev, sizeof(struct card_status),
 532                                  card->status, card->status_address);
 533
 534        pci_release_regions(pdev);
 535        pci_disable_device(pdev);
 536        kfree(card);
 537}
 538
 539#include "wanxlfw.inc"
 540
 541static const struct net_device_ops wanxl_ops = {
 542        .ndo_open       = wanxl_open,
 543        .ndo_stop       = wanxl_close,
 544        .ndo_start_xmit = hdlc_start_xmit,
 545        .ndo_siocwandev = wanxl_ioctl,
 546        .ndo_get_stats  = wanxl_get_stats,
 547};
 548
 549static int wanxl_pci_init_one(struct pci_dev *pdev,
 550                              const struct pci_device_id *ent)
 551{
 552        struct card *card;
 553        u32 ramsize, stat;
 554        unsigned long timeout;
 555        u32 plx_phy;            /* PLX PCI base address */
 556        u32 mem_phy;            /* memory PCI base addr */
 557        u8 __iomem *mem;        /* memory virtual base addr */
 558        int i, ports;
 559
 560#ifndef MODULE
 561        pr_info_once("%s\n", version);
 562#endif
 563
 564        i = pci_enable_device(pdev);
 565        if (i)
 566                return i;
 567
 568        /* QUICC can only access first 256 MB of host RAM directly,
 569         * but PLX9060 DMA does 32-bits for actual packet data transfers
 570         */
 571
 572        /* FIXME when PCI/DMA subsystems are fixed.
 573         * We set both dma_mask and consistent_dma_mask to 28 bits
 574         * and pray pci_alloc_consistent() will use this info. It should
 575         * work on most platforms
 576         */
 577        if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
 578            dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
 579                pr_err("No usable DMA configuration\n");
 580                pci_disable_device(pdev);
 581                return -EIO;
 582        }
 583
 584        i = pci_request_regions(pdev, "wanXL");
 585        if (i) {
 586                pci_disable_device(pdev);
 587                return i;
 588        }
 589
 590        switch (pdev->device) {
 591        case PCI_DEVICE_ID_SBE_WANXL100:
 592                ports = 1;
 593                break;
 594        case PCI_DEVICE_ID_SBE_WANXL200:
 595                ports = 2;
 596                break;
 597        default:
 598                ports = 4;
 599        }
 600
 601        card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
 602        if (!card) {
 603                pci_release_regions(pdev);
 604                pci_disable_device(pdev);
 605                return -ENOBUFS;
 606        }
 607
 608        pci_set_drvdata(pdev, card);
 609        card->pdev = pdev;
 610
 611        card->status = dma_alloc_coherent(&pdev->dev,
 612                                          sizeof(struct card_status),
 613                                          &card->status_address, GFP_KERNEL);
 614        if (!card->status) {
 615                wanxl_pci_remove_one(pdev);
 616                return -ENOBUFS;
 617        }
 618
 619#ifdef DEBUG_PCI
 620        printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
 621               " at 0x%LX\n", pci_name(pdev),
 622               (unsigned long long)card->status_address);
 623#endif
 624
 625        /* FIXME when PCI/DMA subsystems are fixed.
 626         * We set both dma_mask and consistent_dma_mask back to 32 bits
 627         * to indicate the card can do 32-bit DMA addressing
 628         */
 629        if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
 630            dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 631                pr_err("No usable DMA configuration\n");
 632                wanxl_pci_remove_one(pdev);
 633                return -EIO;
 634        }
 635
 636        /* set up PLX mapping */
 637        plx_phy = pci_resource_start(pdev, 0);
 638
 639        card->plx = ioremap(plx_phy, 0x70);
 640        if (!card->plx) {
 641                pr_err("ioremap() failed\n");
 642                wanxl_pci_remove_one(pdev);
 643                return -EFAULT;
 644        }
 645
 646#if RESET_WHILE_LOADING
 647        wanxl_reset(card);
 648#endif
 649
 650        timeout = jiffies + 20 * HZ;
 651        while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
 652                if (time_before(timeout, jiffies)) {
 653                        pr_warn("%s: timeout waiting for PUTS to complete\n",
 654                                pci_name(pdev));
 655                        wanxl_pci_remove_one(pdev);
 656                        return -ENODEV;
 657                }
 658
 659                switch (stat & 0xC0) {
 660                case 0x00:      /* hmm - PUTS completed with non-zero code? */
 661                case 0x80:      /* PUTS still testing the hardware */
 662                        break;
 663
 664                default:
 665                        pr_warn("%s: PUTS test 0x%X failed\n",
 666                                pci_name(pdev), stat & 0x30);
 667                        wanxl_pci_remove_one(pdev);
 668                        return -ENODEV;
 669                }
 670
 671                schedule();
 672        }
 673
 674        /* get on-board memory size (PUTS detects no more than 4 MB) */
 675        ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
 676
 677        /* set up on-board RAM mapping */
 678        mem_phy = pci_resource_start(pdev, 2);
 679
 680        /* sanity check the board's reported memory size */
 681        if (ramsize < BUFFERS_ADDR +
 682            (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
 683                pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
 684                        pci_name(pdev), ramsize,
 685                        BUFFERS_ADDR +
 686                        (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
 687                wanxl_pci_remove_one(pdev);
 688                return -ENODEV;
 689        }
 690
 691        if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
 692                pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
 693                wanxl_pci_remove_one(pdev);
 694                return -ENODEV;
 695        }
 696
 697        for (i = 0; i < RX_QUEUE_LENGTH; i++) {
 698                struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
 699
 700                card->rx_skbs[i] = skb;
 701                if (skb)
 702                        card->status->rx_descs[i].address =
 703                                dma_map_single(&card->pdev->dev, skb->data,
 704                                               BUFFER_LENGTH, DMA_FROM_DEVICE);
 705        }
 706
 707        mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
 708        if (!mem) {
 709                pr_err("ioremap() failed\n");
 710                wanxl_pci_remove_one(pdev);
 711                return -EFAULT;
 712        }
 713
 714        for (i = 0; i < sizeof(firmware); i += 4)
 715                writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
 716
 717        for (i = 0; i < ports; i++)
 718                writel(card->status_address +
 719                       (void *)&card->status->port_status[i] -
 720                       (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
 721        writel(card->status_address, mem + PDM_OFFSET + 20);
 722        writel(PDM_OFFSET, mem);
 723        iounmap(mem);
 724
 725        writel(0, card->plx + PLX_MAILBOX_5);
 726
 727        if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
 728                pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
 729                wanxl_pci_remove_one(pdev);
 730                return -ENODEV;
 731        }
 732
 733        timeout = jiffies + 5 * HZ;
 734        do {
 735                stat = readl(card->plx + PLX_MAILBOX_5);
 736                if (stat)
 737                        break;
 738                schedule();
 739        } while (time_after(timeout, jiffies));
 740
 741        if (!stat) {
 742                pr_warn("%s: timeout while initializing card firmware\n",
 743                        pci_name(pdev));
 744                wanxl_pci_remove_one(pdev);
 745                return -ENODEV;
 746        }
 747
 748#if DETECT_RAM
 749        ramsize = stat;
 750#endif
 751
 752        pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
 753                pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
 754
 755        /* Allocate IRQ */
 756        if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
 757                pr_warn("%s: could not allocate IRQ%i\n",
 758                        pci_name(pdev), pdev->irq);
 759                wanxl_pci_remove_one(pdev);
 760                return -EBUSY;
 761        }
 762        card->irq = pdev->irq;
 763
 764        for (i = 0; i < ports; i++) {
 765                hdlc_device *hdlc;
 766                struct port *port = &card->ports[i];
 767                struct net_device *dev = alloc_hdlcdev(port);
 768
 769                if (!dev) {
 770                        pr_err("%s: unable to allocate memory\n",
 771                               pci_name(pdev));
 772                        wanxl_pci_remove_one(pdev);
 773                        return -ENOMEM;
 774                }
 775
 776                port->dev = dev;
 777                hdlc = dev_to_hdlc(dev);
 778                spin_lock_init(&port->lock);
 779                dev->tx_queue_len = 50;
 780                dev->netdev_ops = &wanxl_ops;
 781                hdlc->attach = wanxl_attach;
 782                hdlc->xmit = wanxl_xmit;
 783                port->card = card;
 784                port->node = i;
 785                get_status(port)->clocking = CLOCK_EXT;
 786                if (register_hdlc_device(dev)) {
 787                        pr_err("%s: unable to register hdlc device\n",
 788                               pci_name(pdev));
 789                        free_netdev(dev);
 790                        wanxl_pci_remove_one(pdev);
 791                        return -ENOBUFS;
 792                }
 793                card->n_ports++;
 794        }
 795
 796        pr_info("%s: port", pci_name(pdev));
 797        for (i = 0; i < ports; i++)
 798                pr_cont("%s #%i: %s",
 799                        i ? "," : "", i, card->ports[i].dev->name);
 800        pr_cont("\n");
 801
 802        for (i = 0; i < ports; i++)
 803                wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
 804
 805        return 0;
 806}
 807
 808static const struct pci_device_id wanxl_pci_tbl[] = {
 809        { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
 810          PCI_ANY_ID, 0, 0, 0 },
 811        { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
 812          PCI_ANY_ID, 0, 0, 0 },
 813        { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
 814          PCI_ANY_ID, 0, 0, 0 },
 815        { 0, }
 816};
 817
 818static struct pci_driver wanxl_pci_driver = {
 819        .name           = "wanXL",
 820        .id_table       = wanxl_pci_tbl,
 821        .probe          = wanxl_pci_init_one,
 822        .remove         = wanxl_pci_remove_one,
 823};
 824
 825static int __init wanxl_init_module(void)
 826{
 827#ifdef MODULE
 828        pr_info("%s\n", version);
 829#endif
 830        return pci_register_driver(&wanxl_pci_driver);
 831}
 832
 833static void __exit wanxl_cleanup_module(void)
 834{
 835        pci_unregister_driver(&wanxl_pci_driver);
 836}
 837
 838MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 839MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
 840MODULE_LICENSE("GPL v2");
 841MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
 842
 843module_init(wanxl_init_module);
 844module_exit(wanxl_cleanup_module);
 845