linux/drivers/net/wan/lmc/lmc_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
   4  * All rights reserved.  www.lanmedia.com
   5  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
   6  *
   7  * This code is written by:
   8  * Andrew Stanley-Jones (asj@cban.com)
   9  * Rob Braun (bbraun@vix.com),
  10  * Michael Graff (explorer@vix.com) and
  11  * Matt Thomas (matt@3am-software.com).
  12  *
  13  * With Help By:
  14  * David Boggs
  15  * Ron Crane
  16  * Alan Cox
  17  *
  18  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
  19  *
  20  * To control link specific options lmcctl is required.
  21  * It can be obtained from ftp.lanmedia.com.
  22  *
  23  * Linux driver notes:
  24  * Linux uses the device struct lmc_private to pass private information
  25  * around.
  26  *
  27  * The initialization portion of this driver (the lmc_reset() and the
  28  * lmc_dec_reset() functions, as well as the led controls and the
  29  * lmc_initcsrs() functions.
  30  *
  31  * The watchdog function runs every second and checks to see if
  32  * we still have link, and that the timing source is what we expected
  33  * it to be.  If link is lost, the interface is marked down, and
  34  * we no longer can transmit.
  35  */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/string.h>
  40#include <linux/timer.h>
  41#include <linux/ptrace.h>
  42#include <linux/errno.h>
  43#include <linux/ioport.h>
  44#include <linux/slab.h>
  45#include <linux/interrupt.h>
  46#include <linux/pci.h>
  47#include <linux/delay.h>
  48#include <linux/hdlc.h>
  49#include <linux/in.h>
  50#include <linux/if_arp.h>
  51#include <linux/netdevice.h>
  52#include <linux/etherdevice.h>
  53#include <linux/skbuff.h>
  54#include <linux/inet.h>
  55#include <linux/bitops.h>
  56#include <asm/processor.h>             /* Processor type for cache alignment. */
  57#include <asm/io.h>
  58#include <asm/dma.h>
  59#include <linux/uaccess.h>
  60//#include <asm/spinlock.h>
  61
  62#define DRIVER_MAJOR_VERSION     1
  63#define DRIVER_MINOR_VERSION    34
  64#define DRIVER_SUB_VERSION       0
  65
  66#define DRIVER_VERSION  ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
  67
  68#include "lmc.h"
  69#include "lmc_var.h"
  70#include "lmc_ioctl.h"
  71#include "lmc_debug.h"
  72#include "lmc_proto.h"
  73
  74static int LMC_PKT_BUF_SZ = 1542;
  75
  76static const struct pci_device_id lmc_pci_tbl[] = {
  77        { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  78          PCI_VENDOR_ID_LMC, PCI_ANY_ID },
  79        { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  80          PCI_ANY_ID, PCI_VENDOR_ID_LMC },
  81        { 0 }
  82};
  83
  84MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
  85MODULE_LICENSE("GPL v2");
  86
  87
  88static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
  89                                        struct net_device *dev);
  90static int lmc_rx (struct net_device *dev);
  91static int lmc_open(struct net_device *dev);
  92static int lmc_close(struct net_device *dev);
  93static struct net_device_stats *lmc_get_stats(struct net_device *dev);
  94static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
  95static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
  96static void lmc_softreset(lmc_softc_t * const);
  97static void lmc_running_reset(struct net_device *dev);
  98static int lmc_ifdown(struct net_device * const);
  99static void lmc_watchdog(struct timer_list *t);
 100static void lmc_reset(lmc_softc_t * const sc);
 101static void lmc_dec_reset(lmc_softc_t * const sc);
 102static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
 103
 104/*
 105 * linux reserves 16 device specific IOCTLs.  We call them
 106 * LMCIOC* to control various bits of our world.
 107 */
 108static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
 109                              void __user *data, int cmd) /*fold00*/
 110{
 111    lmc_softc_t *sc = dev_to_sc(dev);
 112    lmc_ctl_t ctl;
 113    int ret = -EOPNOTSUPP;
 114    u16 regVal;
 115    unsigned long flags;
 116
 117    /*
 118     * Most functions mess with the structure
 119     * Disable interrupts while we do the polling
 120     */
 121
 122    switch (cmd) {
 123        /*
 124         * Return current driver state.  Since we keep this up
 125         * To date internally, just copy this out to the user.
 126         */
 127    case LMCIOCGINFO: /*fold01*/
 128        if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
 129                ret = -EFAULT;
 130        else
 131                ret = 0;
 132        break;
 133
 134    case LMCIOCSINFO: /*fold01*/
 135        if (!capable(CAP_NET_ADMIN)) {
 136            ret = -EPERM;
 137            break;
 138        }
 139
 140        if(dev->flags & IFF_UP){
 141            ret = -EBUSY;
 142            break;
 143        }
 144
 145        if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
 146                ret = -EFAULT;
 147                break;
 148        }
 149
 150        spin_lock_irqsave(&sc->lmc_lock, flags);
 151        sc->lmc_media->set_status (sc, &ctl);
 152
 153        if(ctl.crc_length != sc->ictl.crc_length) {
 154            sc->lmc_media->set_crc_length(sc, ctl.crc_length);
 155            if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
 156                sc->TxDescriptControlInit |=  LMC_TDES_ADD_CRC_DISABLE;
 157            else
 158                sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
 159        }
 160        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 161
 162        ret = 0;
 163        break;
 164
 165    case LMCIOCIFTYPE: /*fold01*/
 166        {
 167            u16 old_type = sc->if_type;
 168            u16 new_type;
 169
 170            if (!capable(CAP_NET_ADMIN)) {
 171                ret = -EPERM;
 172                break;
 173            }
 174
 175            if (copy_from_user(&new_type, data, sizeof(u16))) {
 176                ret = -EFAULT;
 177                break;
 178            }
 179
 180            
 181            if (new_type == old_type)
 182            {
 183                ret = 0 ;
 184                break;                          /* no change */
 185            }
 186            
 187            spin_lock_irqsave(&sc->lmc_lock, flags);
 188            lmc_proto_close(sc);
 189
 190            sc->if_type = new_type;
 191            lmc_proto_attach(sc);
 192            ret = lmc_proto_open(sc);
 193            spin_unlock_irqrestore(&sc->lmc_lock, flags);
 194            break;
 195        }
 196
 197    case LMCIOCGETXINFO: /*fold01*/
 198        spin_lock_irqsave(&sc->lmc_lock, flags);
 199        sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
 200
 201        sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
 202        sc->lmc_xinfo.PciSlotNumber = 0;
 203        sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
 204        sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
 205        sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
 206        sc->lmc_xinfo.XilinxRevisionNumber =
 207            lmc_mii_readreg (sc, 0, 3) & 0xf;
 208        sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
 209        sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
 210        sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
 211        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 212
 213        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
 214
 215        if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
 216                ret = -EFAULT;
 217        else
 218                ret = 0;
 219
 220        break;
 221
 222    case LMCIOCGETLMCSTATS:
 223            spin_lock_irqsave(&sc->lmc_lock, flags);
 224            if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
 225                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
 226                    sc->extra_stats.framingBitErrorCount +=
 227                            lmc_mii_readreg(sc, 0, 18) & 0xff;
 228                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
 229                    sc->extra_stats.framingBitErrorCount +=
 230                            (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 231                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
 232                    sc->extra_stats.lineCodeViolationCount +=
 233                            lmc_mii_readreg(sc, 0, 18) & 0xff;
 234                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
 235                    sc->extra_stats.lineCodeViolationCount +=
 236                            (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 237                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
 238                    regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
 239
 240                    sc->extra_stats.lossOfFrameCount +=
 241                            (regVal & T1FRAMER_LOF_MASK) >> 4;
 242                    sc->extra_stats.changeOfFrameAlignmentCount +=
 243                            (regVal & T1FRAMER_COFA_MASK) >> 2;
 244                    sc->extra_stats.severelyErroredFrameCount +=
 245                            regVal & T1FRAMER_SEF_MASK;
 246            }
 247            spin_unlock_irqrestore(&sc->lmc_lock, flags);
 248            if (copy_to_user(data, &sc->lmc_device->stats,
 249                             sizeof(sc->lmc_device->stats)) ||
 250                copy_to_user(data + sizeof(sc->lmc_device->stats),
 251                             &sc->extra_stats, sizeof(sc->extra_stats)))
 252                    ret = -EFAULT;
 253            else
 254                    ret = 0;
 255            break;
 256
 257    case LMCIOCCLEARLMCSTATS:
 258            if (!capable(CAP_NET_ADMIN)) {
 259                    ret = -EPERM;
 260                    break;
 261            }
 262
 263            spin_lock_irqsave(&sc->lmc_lock, flags);
 264            memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
 265            memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
 266            sc->extra_stats.check = STATCHECK;
 267            sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 268                    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 269            sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 270            spin_unlock_irqrestore(&sc->lmc_lock, flags);
 271            ret = 0;
 272            break;
 273
 274    case LMCIOCSETCIRCUIT: /*fold01*/
 275        if (!capable(CAP_NET_ADMIN)){
 276            ret = -EPERM;
 277            break;
 278        }
 279
 280        if(dev->flags & IFF_UP){
 281            ret = -EBUSY;
 282            break;
 283        }
 284
 285        if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
 286                ret = -EFAULT;
 287                break;
 288        }
 289        spin_lock_irqsave(&sc->lmc_lock, flags);
 290        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
 291        sc->ictl.circuit_type = ctl.circuit_type;
 292        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 293        ret = 0;
 294
 295        break;
 296
 297    case LMCIOCRESET: /*fold01*/
 298        if (!capable(CAP_NET_ADMIN)){
 299            ret = -EPERM;
 300            break;
 301        }
 302
 303        spin_lock_irqsave(&sc->lmc_lock, flags);
 304        /* Reset driver and bring back to current state */
 305        printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 306        lmc_running_reset (dev);
 307        printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 308
 309        LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 310        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 311
 312        ret = 0;
 313        break;
 314
 315#ifdef DEBUG
 316    case LMCIOCDUMPEVENTLOG:
 317        if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
 318                ret = -EFAULT;
 319                break;
 320        }
 321        if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
 322                         sizeof(lmcEventLogBuf)))
 323                ret = -EFAULT;
 324        else
 325                ret = 0;
 326
 327        break;
 328#endif /* end ifdef _DBG_EVENTLOG */
 329    case LMCIOCT1CONTROL: /*fold01*/
 330        if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
 331            ret = -EOPNOTSUPP;
 332            break;
 333        }
 334        break;
 335    case LMCIOCXILINX: /*fold01*/
 336        {
 337            struct lmc_xilinx_control xc; /*fold02*/
 338
 339            if (!capable(CAP_NET_ADMIN)){
 340                ret = -EPERM;
 341                break;
 342            }
 343
 344            /*
 345             * Stop the xwitter whlie we restart the hardware
 346             */
 347            netif_stop_queue(dev);
 348
 349            if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
 350                ret = -EFAULT;
 351                break;
 352            }
 353            switch(xc.command){
 354            case lmc_xilinx_reset: /*fold02*/
 355                {
 356                    spin_lock_irqsave(&sc->lmc_lock, flags);
 357                    lmc_mii_readreg (sc, 0, 16);
 358
 359                    /*
 360                     * Make all of them 0 and make input
 361                     */
 362                    lmc_gpio_mkinput(sc, 0xff);
 363
 364                    /*
 365                     * make the reset output
 366                     */
 367                    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
 368
 369                    /*
 370                     * RESET low to force configuration.  This also forces
 371                     * the transmitter clock to be internal, but we expect to reset
 372                     * that later anyway.
 373                     */
 374
 375                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 376                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 377
 378
 379                    /*
 380                     * hold for more than 10 microseconds
 381                     */
 382                    udelay(50);
 383
 384                    sc->lmc_gpio |= LMC_GEP_RESET;
 385                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 386
 387
 388                    /*
 389                     * stop driving Xilinx-related signals
 390                     */
 391                    lmc_gpio_mkinput(sc, 0xff);
 392
 393                    /* Reset the frammer hardware */
 394                    sc->lmc_media->set_link_status (sc, 1);
 395                    sc->lmc_media->set_status (sc, NULL);
 396//                    lmc_softreset(sc);
 397
 398                    {
 399                        int i;
 400                        for(i = 0; i < 5; i++){
 401                            lmc_led_on(sc, LMC_DS3_LED0);
 402                            mdelay(100);
 403                            lmc_led_off(sc, LMC_DS3_LED0);
 404                            lmc_led_on(sc, LMC_DS3_LED1);
 405                            mdelay(100);
 406                            lmc_led_off(sc, LMC_DS3_LED1);
 407                            lmc_led_on(sc, LMC_DS3_LED3);
 408                            mdelay(100);
 409                            lmc_led_off(sc, LMC_DS3_LED3);
 410                            lmc_led_on(sc, LMC_DS3_LED2);
 411                            mdelay(100);
 412                            lmc_led_off(sc, LMC_DS3_LED2);
 413                        }
 414                    }
 415                    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 416                    
 417                    
 418
 419                    ret = 0x0;
 420
 421                }
 422
 423                break;
 424            case lmc_xilinx_load_prom: /*fold02*/
 425                {
 426                    int timeout = 500000;
 427                    spin_lock_irqsave(&sc->lmc_lock, flags);
 428                    lmc_mii_readreg (sc, 0, 16);
 429
 430                    /*
 431                     * Make all of them 0 and make input
 432                     */
 433                    lmc_gpio_mkinput(sc, 0xff);
 434
 435                    /*
 436                     * make the reset output
 437                     */
 438                    lmc_gpio_mkoutput(sc,  LMC_GEP_DP | LMC_GEP_RESET);
 439
 440                    /*
 441                     * RESET low to force configuration.  This also forces
 442                     * the transmitter clock to be internal, but we expect to reset
 443                     * that later anyway.
 444                     */
 445
 446                    sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
 447                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 448
 449
 450                    /*
 451                     * hold for more than 10 microseconds
 452                     */
 453                    udelay(50);
 454
 455                    sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
 456                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 457
 458                    /*
 459                     * busy wait for the chip to reset
 460                     */
 461                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 462                           (timeout-- > 0))
 463                        cpu_relax();
 464
 465
 466                    /*
 467                     * stop driving Xilinx-related signals
 468                     */
 469                    lmc_gpio_mkinput(sc, 0xff);
 470                    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 471
 472                    ret = 0x0;
 473                    
 474
 475                    break;
 476
 477                }
 478
 479            case lmc_xilinx_load: /*fold02*/
 480                {
 481                    char *data;
 482                    int pos;
 483                    int timeout = 500000;
 484
 485                    if (!xc.data) {
 486                            ret = -EINVAL;
 487                            break;
 488                    }
 489
 490                    data = memdup_user(xc.data, xc.len);
 491                    if (IS_ERR(data)) {
 492                            ret = PTR_ERR(data);
 493                            break;
 494                    }
 495
 496                    printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
 497
 498                    spin_lock_irqsave(&sc->lmc_lock, flags);
 499                    lmc_gpio_mkinput(sc, 0xff);
 500
 501                    /*
 502                     * Clear the Xilinx and start prgramming from the DEC
 503                     */
 504
 505                    /*
 506                     * Set ouput as:
 507                     * Reset: 0 (active)
 508                     * DP:    0 (active)
 509                     * Mode:  1
 510                     *
 511                     */
 512                    sc->lmc_gpio = 0x00;
 513                    sc->lmc_gpio &= ~LMC_GEP_DP;
 514                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 515                    sc->lmc_gpio |=  LMC_GEP_MODE;
 516                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 517
 518                    lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
 519
 520                    /*
 521                     * Wait at least 10 us 20 to be safe
 522                     */
 523                    udelay(50);
 524
 525                    /*
 526                     * Clear reset and activate programming lines
 527                     * Reset: Input
 528                     * DP:    Input
 529                     * Clock: Output
 530                     * Data:  Output
 531                     * Mode:  Output
 532                     */
 533                    lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
 534
 535                    /*
 536                     * Set LOAD, DATA, Clock to 1
 537                     */
 538                    sc->lmc_gpio = 0x00;
 539                    sc->lmc_gpio |= LMC_GEP_MODE;
 540                    sc->lmc_gpio |= LMC_GEP_DATA;
 541                    sc->lmc_gpio |= LMC_GEP_CLK;
 542                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 543                    
 544                    lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
 545
 546                    /*
 547                     * busy wait for the chip to reset
 548                     */
 549                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 550                           (timeout-- > 0))
 551                        cpu_relax();
 552
 553                    printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
 554
 555                    for(pos = 0; pos < xc.len; pos++){
 556                        switch(data[pos]){
 557                        case 0:
 558                            sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
 559                            break;
 560                        case 1:
 561                            sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
 562                            break;
 563                        default:
 564                            printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
 565                            sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
 566                        }
 567                        sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
 568                        sc->lmc_gpio |= LMC_GEP_MODE;
 569                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 570                        udelay(1);
 571                        
 572                        sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
 573                        sc->lmc_gpio |= LMC_GEP_MODE;
 574                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 575                        udelay(1);
 576                    }
 577                    if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
 578                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
 579                    }
 580                    else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
 581                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
 582                    }
 583                    else {
 584                        printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
 585                    }
 586
 587                    lmc_gpio_mkinput(sc, 0xff);
 588                    
 589                    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
 590                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 591
 592                    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
 593                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 594                    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 595
 596                    kfree(data);
 597                    
 598                    ret = 0;
 599                    
 600                    break;
 601                }
 602            default: /*fold02*/
 603                ret = -EBADE;
 604                break;
 605            }
 606
 607            netif_wake_queue(dev);
 608            sc->lmc_txfull = 0;
 609
 610        }
 611        break;
 612    default:
 613        break;
 614    }
 615
 616    return ret;
 617}
 618
 619
 620/* the watchdog process that cruises around */
 621static void lmc_watchdog(struct timer_list *t) /*fold00*/
 622{
 623    lmc_softc_t *sc = from_timer(sc, t, timer);
 624    struct net_device *dev = sc->lmc_device;
 625    int link_status;
 626    u32 ticks;
 627    unsigned long flags;
 628
 629    spin_lock_irqsave(&sc->lmc_lock, flags);
 630
 631    if(sc->check != 0xBEAFCAFE){
 632        printk("LMC: Corrupt net_device struct, breaking out\n");
 633        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 634        return;
 635    }
 636
 637
 638    /* Make sure the tx jabber and rx watchdog are off,
 639     * and the transmit and receive processes are running.
 640     */
 641
 642    LMC_CSR_WRITE (sc, csr_15, 0x00000011);
 643    sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
 644    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
 645
 646    if (sc->lmc_ok == 0)
 647        goto kick_timer;
 648
 649    LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 650
 651    /* --- begin time out check -----------------------------------
 652     * check for a transmit interrupt timeout
 653     * Has the packet xmt vs xmt serviced threshold been exceeded */
 654    if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 655        sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 656        sc->tx_TimeoutInd == 0)
 657    {
 658
 659        /* wait for the watchdog to come around again */
 660        sc->tx_TimeoutInd = 1;
 661    }
 662    else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 663             sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 664             sc->tx_TimeoutInd)
 665    {
 666
 667        LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
 668
 669        sc->tx_TimeoutDisplay = 1;
 670        sc->extra_stats.tx_TimeoutCnt++;
 671
 672        /* DEC chip is stuck, hit it with a RESET!!!! */
 673        lmc_running_reset (dev);
 674
 675
 676        /* look at receive & transmit process state to make sure they are running */
 677        LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
 678
 679        /* look at: DSR - 02  for Reg 16
 680         *                  CTS - 08
 681         *                  DCD - 10
 682         *                  RI  - 20
 683         * for Reg 17
 684         */
 685        LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
 686
 687        /* reset the transmit timeout detection flag */
 688        sc->tx_TimeoutInd = 0;
 689        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 690        sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 691    } else {
 692        sc->tx_TimeoutInd = 0;
 693        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 694        sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 695    }
 696
 697    /* --- end time out check ----------------------------------- */
 698
 699
 700    link_status = sc->lmc_media->get_link_status (sc);
 701
 702    /*
 703     * hardware level link lost, but the interface is marked as up.
 704     * Mark it as down.
 705     */
 706    if ((link_status == 0) && (sc->last_link_status != 0)) {
 707        printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
 708        sc->last_link_status = 0;
 709        /* lmc_reset (sc); Why reset??? The link can go down ok */
 710
 711        /* Inform the world that link has been lost */
 712        netif_carrier_off(dev);
 713    }
 714
 715    /*
 716     * hardware link is up, but the interface is marked as down.
 717     * Bring it back up again.
 718     */
 719     if (link_status != 0 && sc->last_link_status == 0) {
 720         printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
 721         sc->last_link_status = 1;
 722         /* lmc_reset (sc); Again why reset??? */
 723
 724         netif_carrier_on(dev);
 725     }
 726
 727    /* Call media specific watchdog functions */
 728    sc->lmc_media->watchdog(sc);
 729
 730    /*
 731     * Poke the transmitter to make sure it
 732     * never stops, even if we run out of mem
 733     */
 734    LMC_CSR_WRITE(sc, csr_rxpoll, 0);
 735
 736    /*
 737     * Check for code that failed
 738     * and try and fix it as appropriate
 739     */
 740    if(sc->failed_ring == 1){
 741        /*
 742         * Failed to setup the recv/xmit rin
 743         * Try again
 744         */
 745        sc->failed_ring = 0;
 746        lmc_softreset(sc);
 747    }
 748    if(sc->failed_recv_alloc == 1){
 749        /*
 750         * We failed to alloc mem in the
 751         * interrupt handler, go through the rings
 752         * and rebuild them
 753         */
 754        sc->failed_recv_alloc = 0;
 755        lmc_softreset(sc);
 756    }
 757
 758
 759    /*
 760     * remember the timer value
 761     */
 762kick_timer:
 763
 764    ticks = LMC_CSR_READ (sc, csr_gp_timer);
 765    LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
 766    sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
 767
 768    /*
 769     * restart this timer.
 770     */
 771    sc->timer.expires = jiffies + (HZ);
 772    add_timer (&sc->timer);
 773
 774    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 775}
 776
 777static int lmc_attach(struct net_device *dev, unsigned short encoding,
 778                      unsigned short parity)
 779{
 780        if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
 781                return 0;
 782        return -EINVAL;
 783}
 784
 785static const struct net_device_ops lmc_ops = {
 786        .ndo_open       = lmc_open,
 787        .ndo_stop       = lmc_close,
 788        .ndo_start_xmit = hdlc_start_xmit,
 789        .ndo_siocwandev = hdlc_ioctl,
 790        .ndo_siocdevprivate = lmc_siocdevprivate,
 791        .ndo_tx_timeout = lmc_driver_timeout,
 792        .ndo_get_stats  = lmc_get_stats,
 793};
 794
 795static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 796{
 797        lmc_softc_t *sc;
 798        struct net_device *dev;
 799        u16 subdevice;
 800        u16 AdapModelNum;
 801        int err;
 802        static int cards_found;
 803
 804        err = pcim_enable_device(pdev);
 805        if (err) {
 806                printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
 807                return err;
 808        }
 809
 810        err = pci_request_regions(pdev, "lmc");
 811        if (err) {
 812                printk(KERN_ERR "lmc: pci_request_region failed\n");
 813                return err;
 814        }
 815
 816        /*
 817         * Allocate our own device structure
 818         */
 819        sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
 820        if (!sc)
 821                return -ENOMEM;
 822
 823        dev = alloc_hdlcdev(sc);
 824        if (!dev) {
 825                printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
 826                return -ENOMEM;
 827        }
 828
 829
 830        dev->type = ARPHRD_HDLC;
 831        dev_to_hdlc(dev)->xmit = lmc_start_xmit;
 832        dev_to_hdlc(dev)->attach = lmc_attach;
 833        dev->netdev_ops = &lmc_ops;
 834        dev->watchdog_timeo = HZ; /* 1 second */
 835        dev->tx_queue_len = 100;
 836        sc->lmc_device = dev;
 837        sc->name = dev->name;
 838        sc->if_type = LMC_PPP;
 839        sc->check = 0xBEAFCAFE;
 840        dev->base_addr = pci_resource_start(pdev, 0);
 841        dev->irq = pdev->irq;
 842        pci_set_drvdata(pdev, dev);
 843        SET_NETDEV_DEV(dev, &pdev->dev);
 844
 845        /*
 846         * This will get the protocol layer ready and do any 1 time init's
 847         * Must have a valid sc and dev structure
 848         */
 849        lmc_proto_attach(sc);
 850
 851        /* Init the spin lock so can call it latter */
 852
 853        spin_lock_init(&sc->lmc_lock);
 854        pci_set_master(pdev);
 855
 856        printk(KERN_INFO "hdlc: detected at %lx, irq %d\n",
 857               dev->base_addr, dev->irq);
 858
 859        err = register_hdlc_device(dev);
 860        if (err) {
 861                printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
 862                free_netdev(dev);
 863                return err;
 864        }
 865
 866    sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
 867    sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
 868
 869    /*
 870     *
 871     * Check either the subvendor or the subdevice, some systems reverse
 872     * the setting in the bois, seems to be version and arch dependent?
 873     * Fix the error, exchange the two values 
 874     */
 875    if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
 876            subdevice = pdev->subsystem_vendor;
 877
 878    switch (subdevice) {
 879    case PCI_DEVICE_ID_LMC_HSSI:
 880        printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
 881        sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
 882        sc->lmc_media = &lmc_hssi_media;
 883        break;
 884    case PCI_DEVICE_ID_LMC_DS3:
 885        printk(KERN_INFO "%s: LMC DS3\n", dev->name);
 886        sc->lmc_cardtype = LMC_CARDTYPE_DS3;
 887        sc->lmc_media = &lmc_ds3_media;
 888        break;
 889    case PCI_DEVICE_ID_LMC_SSI:
 890        printk(KERN_INFO "%s: LMC SSI\n", dev->name);
 891        sc->lmc_cardtype = LMC_CARDTYPE_SSI;
 892        sc->lmc_media = &lmc_ssi_media;
 893        break;
 894    case PCI_DEVICE_ID_LMC_T1:
 895        printk(KERN_INFO "%s: LMC T1\n", dev->name);
 896        sc->lmc_cardtype = LMC_CARDTYPE_T1;
 897        sc->lmc_media = &lmc_t1_media;
 898        break;
 899    default:
 900        printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
 901        unregister_hdlc_device(dev);
 902        return -EIO;
 903        break;
 904    }
 905
 906    lmc_initcsrs (sc, dev->base_addr, 8);
 907
 908    lmc_gpio_mkinput (sc, 0xff);
 909    sc->lmc_gpio = 0;           /* drive no signals yet */
 910
 911    sc->lmc_media->defaults (sc);
 912
 913    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
 914
 915    /* verify that the PCI Sub System ID matches the Adapter Model number
 916     * from the MII register
 917     */
 918    AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
 919
 920    if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
 921         subdevice != PCI_DEVICE_ID_LMC_T1) &&
 922        (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
 923         subdevice != PCI_DEVICE_ID_LMC_SSI) &&
 924        (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
 925         subdevice != PCI_DEVICE_ID_LMC_DS3) &&
 926        (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
 927         subdevice != PCI_DEVICE_ID_LMC_HSSI))
 928            printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
 929                   " Subsystem ID = 0x%04x\n",
 930                   dev->name, AdapModelNum, subdevice);
 931
 932    /*
 933     * reset clock
 934     */
 935    LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
 936
 937    sc->board_idx = cards_found++;
 938    sc->extra_stats.check = STATCHECK;
 939    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 940            sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 941    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 942
 943    sc->lmc_ok = 0;
 944    sc->last_link_status = 0;
 945
 946    return 0;
 947}
 948
 949/*
 950 * Called from pci when removing module.
 951 */
 952static void lmc_remove_one(struct pci_dev *pdev)
 953{
 954        struct net_device *dev = pci_get_drvdata(pdev);
 955
 956        if (dev) {
 957                printk(KERN_DEBUG "%s: removing...\n", dev->name);
 958                unregister_hdlc_device(dev);
 959                free_netdev(dev);
 960        }
 961}
 962
 963/* After this is called, packets can be sent.
 964 * Does not initialize the addresses
 965 */
 966static int lmc_open(struct net_device *dev)
 967{
 968    lmc_softc_t *sc = dev_to_sc(dev);
 969    int err;
 970
 971    lmc_led_on(sc, LMC_DS3_LED0);
 972
 973    lmc_dec_reset(sc);
 974    lmc_reset(sc);
 975
 976    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
 977    LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
 978                  lmc_mii_readreg(sc, 0, 17));
 979
 980    if (sc->lmc_ok)
 981        return 0;
 982
 983    lmc_softreset (sc);
 984
 985    /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
 986    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
 987        printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
 988        return -EAGAIN;
 989    }
 990    sc->got_irq = 1;
 991
 992    /* Assert Terminal Active */
 993    sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
 994    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
 995
 996    /*
 997     * reset to last state.
 998     */
 999    sc->lmc_media->set_status (sc, NULL);
1000
1001    /* setup default bits to be used in tulip_desc_t transmit descriptor
1002     * -baz */
1003    sc->TxDescriptControlInit = (
1004                                 LMC_TDES_INTERRUPT_ON_COMPLETION
1005                                 | LMC_TDES_FIRST_SEGMENT
1006                                 | LMC_TDES_LAST_SEGMENT
1007                                 | LMC_TDES_SECOND_ADDR_CHAINED
1008                                 | LMC_TDES_DISABLE_PADDING
1009                                );
1010
1011    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
1012        /* disable 32 bit CRC generated by ASIC */
1013        sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
1014    }
1015    sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
1016    /* Acknoledge the Terminal Active and light LEDs */
1017
1018    /* dev->flags |= IFF_UP; */
1019
1020    if ((err = lmc_proto_open(sc)) != 0)
1021            return err;
1022
1023    netif_start_queue(dev);
1024    sc->extra_stats.tx_tbusy0++;
1025
1026    /*
1027     * select what interrupts we want to get
1028     */
1029    sc->lmc_intrmask = 0;
1030    /* Should be using the default interrupt mask defined in the .h file. */
1031    sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
1032                         | TULIP_STS_RXINTR
1033                         | TULIP_STS_TXINTR
1034                         | TULIP_STS_ABNRMLINTR
1035                         | TULIP_STS_SYSERROR
1036                         | TULIP_STS_TXSTOPPED
1037                         | TULIP_STS_TXUNDERFLOW
1038                         | TULIP_STS_RXSTOPPED
1039                         | TULIP_STS_RXNOBUF
1040                        );
1041    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1042
1043    sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
1044    sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
1045    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1046
1047    sc->lmc_ok = 1; /* Run watchdog */
1048
1049    /*
1050     * Set the if up now - pfb
1051     */
1052
1053    sc->last_link_status = 1;
1054
1055    /*
1056     * Setup a timer for the watchdog on probe, and start it running.
1057     * Since lmc_ok == 0, it will be a NOP for now.
1058     */
1059    timer_setup(&sc->timer, lmc_watchdog, 0);
1060    sc->timer.expires = jiffies + HZ;
1061    add_timer (&sc->timer);
1062
1063    return 0;
1064}
1065
1066/* Total reset to compensate for the AdTran DSU doing bad things
1067 *  under heavy load
1068 */
1069
1070static void lmc_running_reset (struct net_device *dev) /*fold00*/
1071{
1072    lmc_softc_t *sc = dev_to_sc(dev);
1073
1074    /* stop interrupts */
1075    /* Clear the interrupt mask */
1076    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1077
1078    lmc_dec_reset (sc);
1079    lmc_reset (sc);
1080    lmc_softreset (sc);
1081    /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1082    sc->lmc_media->set_link_status (sc, 1);
1083    sc->lmc_media->set_status (sc, NULL);
1084
1085    netif_wake_queue(dev);
1086
1087    sc->lmc_txfull = 0;
1088    sc->extra_stats.tx_tbusy0++;
1089
1090    sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1091    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1092
1093    sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
1094    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1095}
1096
1097
1098/* This is what is called when you ifconfig down a device.
1099 * This disables the timer for the watchdog and keepalives,
1100 * and disables the irq for dev.
1101 */
1102static int lmc_close(struct net_device *dev)
1103{
1104    /* not calling release_region() as we should */
1105    lmc_softc_t *sc = dev_to_sc(dev);
1106
1107    sc->lmc_ok = 0;
1108    sc->lmc_media->set_link_status (sc, 0);
1109    del_timer (&sc->timer);
1110    lmc_proto_close(sc);
1111    lmc_ifdown (dev);
1112
1113    return 0;
1114}
1115
1116/* Ends the transfer of packets */
1117/* When the interface goes down, this is called */
1118static int lmc_ifdown (struct net_device *dev) /*fold00*/
1119{
1120    lmc_softc_t *sc = dev_to_sc(dev);
1121    u32 csr6;
1122    int i;
1123
1124    /* Don't let anything else go on right now */
1125    //    dev->start = 0;
1126    netif_stop_queue(dev);
1127    sc->extra_stats.tx_tbusy1++;
1128
1129    /* stop interrupts */
1130    /* Clear the interrupt mask */
1131    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1132
1133    /* Stop Tx and Rx on the chip */
1134    csr6 = LMC_CSR_READ (sc, csr_command);
1135    csr6 &= ~LMC_DEC_ST;                /* Turn off the Transmission bit */
1136    csr6 &= ~LMC_DEC_SR;                /* Turn off the Receive bit */
1137    LMC_CSR_WRITE (sc, csr_command, csr6);
1138
1139    sc->lmc_device->stats.rx_missed_errors +=
1140            LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1141
1142    /* release the interrupt */
1143    if(sc->got_irq == 1){
1144        free_irq (dev->irq, dev);
1145        sc->got_irq = 0;
1146    }
1147
1148    /* free skbuffs in the Rx queue */
1149    for (i = 0; i < LMC_RXDESCS; i++)
1150    {
1151        struct sk_buff *skb = sc->lmc_rxq[i];
1152        sc->lmc_rxq[i] = NULL;
1153        sc->lmc_rxring[i].status = 0;
1154        sc->lmc_rxring[i].length = 0;
1155        sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
1156        if (skb != NULL)
1157            dev_kfree_skb(skb);
1158        sc->lmc_rxq[i] = NULL;
1159    }
1160
1161    for (i = 0; i < LMC_TXDESCS; i++)
1162    {
1163        if (sc->lmc_txq[i] != NULL)
1164            dev_kfree_skb(sc->lmc_txq[i]);
1165        sc->lmc_txq[i] = NULL;
1166    }
1167
1168    lmc_led_off (sc, LMC_MII16_LED_ALL);
1169
1170    netif_wake_queue(dev);
1171    sc->extra_stats.tx_tbusy0++;
1172
1173    return 0;
1174}
1175
1176/* Interrupt handling routine.  This will take an incoming packet, or clean
1177 * up after a trasmit.
1178 */
1179static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1180{
1181    struct net_device *dev = (struct net_device *) dev_instance;
1182    lmc_softc_t *sc = dev_to_sc(dev);
1183    u32 csr;
1184    int i;
1185    s32 stat;
1186    unsigned int badtx;
1187    int max_work = LMC_RXDESCS;
1188    int handled = 0;
1189
1190    spin_lock(&sc->lmc_lock);
1191
1192    /*
1193     * Read the csr to find what interrupts we have (if any)
1194     */
1195    csr = LMC_CSR_READ (sc, csr_status);
1196
1197    /*
1198     * Make sure this is our interrupt
1199     */
1200    if ( ! (csr & sc->lmc_intrmask)) {
1201        goto lmc_int_fail_out;
1202    }
1203
1204    /* always go through this loop at least once */
1205    while (csr & sc->lmc_intrmask) {
1206        handled = 1;
1207
1208        /*
1209         * Clear interrupt bits, we handle all case below
1210         */
1211        LMC_CSR_WRITE (sc, csr_status, csr);
1212
1213        /*
1214         * One of
1215         *  - Transmit process timed out CSR5<1>
1216         *  - Transmit jabber timeout    CSR5<3>
1217         *  - Transmit underflow         CSR5<5>
1218         *  - Transmit Receiver buffer unavailable CSR5<7>
1219         *  - Receive process stopped    CSR5<8>
1220         *  - Receive watchdog timeout   CSR5<9>
1221         *  - Early transmit interrupt   CSR5<10>
1222         *
1223         * Is this really right? Should we do a running reset for jabber?
1224         * (being a WAN card and all)
1225         */
1226        if (csr & TULIP_STS_ABNRMLINTR){
1227            lmc_running_reset (dev);
1228            break;
1229        }
1230
1231        if (csr & TULIP_STS_RXINTR)
1232            lmc_rx (dev);
1233
1234        if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
1235
1236            int         n_compl = 0 ;
1237            /* reset the transmit timeout detection flag -baz */
1238            sc->extra_stats.tx_NoCompleteCnt = 0;
1239
1240            badtx = sc->lmc_taint_tx;
1241            i = badtx % LMC_TXDESCS;
1242
1243            while ((badtx < sc->lmc_next_tx)) {
1244                stat = sc->lmc_txring[i].status;
1245
1246                LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
1247                                                 sc->lmc_txring[i].length);
1248                /*
1249                 * If bit 31 is 1 the tulip owns it break out of the loop
1250                 */
1251                if (stat & 0x80000000)
1252                    break;
1253
1254                n_compl++ ;             /* i.e., have an empty slot in ring */
1255                /*
1256                 * If we have no skbuff or have cleared it
1257                 * Already continue to the next buffer
1258                 */
1259                if (sc->lmc_txq[i] == NULL)
1260                    continue;
1261
1262                /*
1263                 * Check the total error summary to look for any errors
1264                 */
1265                if (stat & 0x8000) {
1266                        sc->lmc_device->stats.tx_errors++;
1267                        if (stat & 0x4104)
1268                                sc->lmc_device->stats.tx_aborted_errors++;
1269                        if (stat & 0x0C00)
1270                                sc->lmc_device->stats.tx_carrier_errors++;
1271                        if (stat & 0x0200)
1272                                sc->lmc_device->stats.tx_window_errors++;
1273                        if (stat & 0x0002)
1274                                sc->lmc_device->stats.tx_fifo_errors++;
1275                } else {
1276                        sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1277
1278                        sc->lmc_device->stats.tx_packets++;
1279                }
1280
1281                dev_consume_skb_irq(sc->lmc_txq[i]);
1282                sc->lmc_txq[i] = NULL;
1283
1284                badtx++;
1285                i = badtx % LMC_TXDESCS;
1286            }
1287
1288            if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
1289            {
1290                printk ("%s: out of sync pointer\n", dev->name);
1291                badtx += LMC_TXDESCS;
1292            }
1293            LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1294            sc->lmc_txfull = 0;
1295            netif_wake_queue(dev);
1296            sc->extra_stats.tx_tbusy0++;
1297
1298
1299#ifdef DEBUG
1300            sc->extra_stats.dirtyTx = badtx;
1301            sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1302            sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1303#endif
1304            sc->lmc_taint_tx = badtx;
1305
1306            /*
1307             * Why was there a break here???
1308             */
1309        }                       /* end handle transmit interrupt */
1310
1311        if (csr & TULIP_STS_SYSERROR) {
1312            u32 error;
1313            printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
1314            error = csr>>23 & 0x7;
1315            switch(error){
1316            case 0x000:
1317                printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
1318                break;
1319            case 0x001:
1320                printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1321                break;
1322            case 0x002:
1323                printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1324                break;
1325            default:
1326                printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
1327            }
1328            lmc_dec_reset (sc);
1329            lmc_reset (sc);
1330            LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1331            LMC_EVENT_LOG(LMC_EVENT_RESET2,
1332                          lmc_mii_readreg (sc, 0, 16),
1333                          lmc_mii_readreg (sc, 0, 17));
1334
1335        }
1336
1337        
1338        if(max_work-- <= 0)
1339            break;
1340        
1341        /*
1342         * Get current csr status to make sure
1343         * we've cleared all interrupts
1344         */
1345        csr = LMC_CSR_READ (sc, csr_status);
1346    }                           /* end interrupt loop */
1347    LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
1348
1349lmc_int_fail_out:
1350
1351    spin_unlock(&sc->lmc_lock);
1352
1353    return IRQ_RETVAL(handled);
1354}
1355
1356static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
1357                                        struct net_device *dev)
1358{
1359    lmc_softc_t *sc = dev_to_sc(dev);
1360    u32 flag;
1361    int entry;
1362    unsigned long flags;
1363
1364    spin_lock_irqsave(&sc->lmc_lock, flags);
1365
1366    /* normal path, tbusy known to be zero */
1367
1368    entry = sc->lmc_next_tx % LMC_TXDESCS;
1369
1370    sc->lmc_txq[entry] = skb;
1371    sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
1372
1373    LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
1374
1375#ifndef GCOM
1376    /* If the queue is less than half full, don't interrupt */
1377    if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
1378    {
1379        /* Do not interrupt on completion of this packet */
1380        flag = 0x60000000;
1381        netif_wake_queue(dev);
1382    }
1383    else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
1384    {
1385        /* This generates an interrupt on completion of this packet */
1386        flag = 0xe0000000;
1387        netif_wake_queue(dev);
1388    }
1389    else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
1390    {
1391        /* Do not interrupt on completion of this packet */
1392        flag = 0x60000000;
1393        netif_wake_queue(dev);
1394    }
1395    else
1396    {
1397        /* This generates an interrupt on completion of this packet */
1398        flag = 0xe0000000;
1399        sc->lmc_txfull = 1;
1400        netif_stop_queue(dev);
1401    }
1402#else
1403    flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
1404
1405    if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1406    {                           /* ring full, go busy */
1407        sc->lmc_txfull = 1;
1408        netif_stop_queue(dev);
1409        sc->extra_stats.tx_tbusy1++;
1410        LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1411    }
1412#endif
1413
1414
1415    if (entry == LMC_TXDESCS - 1)       /* last descriptor in ring */
1416        flag |= LMC_TDES_END_OF_RING;   /* flag as such for Tulip */
1417
1418    /* don't pad small packets either */
1419    flag = sc->lmc_txring[entry].length = (skb->len) | flag |
1420                                                sc->TxDescriptControlInit;
1421
1422    /* set the transmit timeout flag to be checked in
1423     * the watchdog timer handler. -baz
1424     */
1425
1426    sc->extra_stats.tx_NoCompleteCnt++;
1427    sc->lmc_next_tx++;
1428
1429    /* give ownership to the chip */
1430    LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
1431    sc->lmc_txring[entry].status = 0x80000000;
1432
1433    /* send now! */
1434    LMC_CSR_WRITE (sc, csr_txpoll, 0);
1435
1436    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1437
1438    return NETDEV_TX_OK;
1439}
1440
1441
1442static int lmc_rx(struct net_device *dev)
1443{
1444    lmc_softc_t *sc = dev_to_sc(dev);
1445    int i;
1446    int rx_work_limit = LMC_RXDESCS;
1447    int rxIntLoopCnt;           /* debug -baz */
1448    int localLengthErrCnt = 0;
1449    long stat;
1450    struct sk_buff *skb, *nsb;
1451    u16 len;
1452
1453    lmc_led_on(sc, LMC_DS3_LED3);
1454
1455    rxIntLoopCnt = 0;           /* debug -baz */
1456
1457    i = sc->lmc_next_rx % LMC_RXDESCS;
1458
1459    while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
1460    {
1461        rxIntLoopCnt++;         /* debug -baz */
1462        len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1463        if ((stat & 0x0300) != 0x0300) {  /* Check first segment and last segment */
1464                if ((stat & 0x0000ffff) != 0x7fff) {
1465                        /* Oversized frame */
1466                        sc->lmc_device->stats.rx_length_errors++;
1467                        goto skip_packet;
1468                }
1469        }
1470
1471        if (stat & 0x00000008) { /* Catch a dribbling bit error */
1472                sc->lmc_device->stats.rx_errors++;
1473                sc->lmc_device->stats.rx_frame_errors++;
1474                goto skip_packet;
1475        }
1476
1477
1478        if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1479                sc->lmc_device->stats.rx_errors++;
1480                sc->lmc_device->stats.rx_crc_errors++;
1481                goto skip_packet;
1482        }
1483
1484        if (len > LMC_PKT_BUF_SZ) {
1485                sc->lmc_device->stats.rx_length_errors++;
1486                localLengthErrCnt++;
1487                goto skip_packet;
1488        }
1489
1490        if (len < sc->lmc_crcSize + 2) {
1491                sc->lmc_device->stats.rx_length_errors++;
1492                sc->extra_stats.rx_SmallPktCnt++;
1493                localLengthErrCnt++;
1494                goto skip_packet;
1495        }
1496
1497        if(stat & 0x00004000){
1498            printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
1499        }
1500
1501        len -= sc->lmc_crcSize;
1502
1503        skb = sc->lmc_rxq[i];
1504
1505        /*
1506         * We ran out of memory at some point
1507         * just allocate an skb buff and continue.
1508         */
1509        
1510        if (!skb) {
1511            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1512            if (nsb) {
1513                sc->lmc_rxq[i] = nsb;
1514                nsb->dev = dev;
1515                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1516            }
1517            sc->failed_recv_alloc = 1;
1518            goto skip_packet;
1519        }
1520        
1521        sc->lmc_device->stats.rx_packets++;
1522        sc->lmc_device->stats.rx_bytes += len;
1523
1524        LMC_CONSOLE_LOG("recv", skb->data, len);
1525
1526        /*
1527         * I'm not sure of the sanity of this
1528         * Packets could be arriving at a constant
1529         * 44.210mbits/sec and we're going to copy
1530         * them into a new buffer??
1531         */
1532        
1533        if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
1534            /*
1535             * If it's a large packet don't copy it just hand it up
1536             */
1537        give_it_anyways:
1538
1539            sc->lmc_rxq[i] = NULL;
1540            sc->lmc_rxring[i].buffer1 = 0x0;
1541
1542            skb_put (skb, len);
1543            skb->protocol = lmc_proto_type(sc, skb);
1544            skb_reset_mac_header(skb);
1545            /* skb_reset_network_header(skb); */
1546            skb->dev = dev;
1547            lmc_proto_netif(sc, skb);
1548
1549            /*
1550             * This skb will be destroyed by the upper layers, make a new one
1551             */
1552            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1553            if (nsb) {
1554                sc->lmc_rxq[i] = nsb;
1555                nsb->dev = dev;
1556                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1557                /* Transferred to 21140 below */
1558            }
1559            else {
1560                /*
1561                 * We've run out of memory, stop trying to allocate
1562                 * memory and exit the interrupt handler
1563                 *
1564                 * The chip may run out of receivers and stop
1565                 * in which care we'll try to allocate the buffer
1566                 * again.  (once a second)
1567                 */
1568                sc->extra_stats.rx_BuffAllocErr++;
1569                LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1570                sc->failed_recv_alloc = 1;
1571                goto skip_out_of_mem;
1572            }
1573        }
1574        else {
1575            nsb = dev_alloc_skb(len);
1576            if(!nsb) {
1577                goto give_it_anyways;
1578            }
1579            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
1580            
1581            nsb->protocol = lmc_proto_type(sc, nsb);
1582            skb_reset_mac_header(nsb);
1583            /* skb_reset_network_header(nsb); */
1584            nsb->dev = dev;
1585            lmc_proto_netif(sc, nsb);
1586        }
1587
1588    skip_packet:
1589        LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1590        sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
1591
1592        sc->lmc_next_rx++;
1593        i = sc->lmc_next_rx % LMC_RXDESCS;
1594        rx_work_limit--;
1595        if (rx_work_limit < 0)
1596            break;
1597    }
1598
1599    /* detect condition for LMC1000 where DSU cable attaches and fills
1600     * descriptors with bogus packets
1601     *
1602    if (localLengthErrCnt > LMC_RXDESCS - 3) {
1603        sc->extra_stats.rx_BadPktSurgeCnt++;
1604        LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1605                      sc->extra_stats.rx_BadPktSurgeCnt);
1606    } */
1607
1608    /* save max count of receive descriptors serviced */
1609    if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1610            sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1611
1612#ifdef DEBUG
1613    if (rxIntLoopCnt == 0)
1614    {
1615        for (i = 0; i < LMC_RXDESCS; i++)
1616        {
1617            if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
1618                != DESC_OWNED_BY_DC21X4)
1619            {
1620                rxIntLoopCnt++;
1621            }
1622        }
1623        LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
1624    }
1625#endif
1626
1627
1628    lmc_led_off(sc, LMC_DS3_LED3);
1629
1630skip_out_of_mem:
1631    return 0;
1632}
1633
1634static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1635{
1636    lmc_softc_t *sc = dev_to_sc(dev);
1637    unsigned long flags;
1638
1639    spin_lock_irqsave(&sc->lmc_lock, flags);
1640
1641    sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1642
1643    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1644
1645    return &sc->lmc_device->stats;
1646}
1647
1648static struct pci_driver lmc_driver = {
1649        .name           = "lmc",
1650        .id_table       = lmc_pci_tbl,
1651        .probe          = lmc_init_one,
1652        .remove         = lmc_remove_one,
1653};
1654
1655module_pci_driver(lmc_driver);
1656
1657unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1658{
1659    int i;
1660    int command = (0xf6 << 10) | (devaddr << 5) | regno;
1661    int retval = 0;
1662
1663    LMC_MII_SYNC (sc);
1664
1665    for (i = 15; i >= 0; i--)
1666    {
1667        int dataval = (command & (1 << i)) ? 0x20000 : 0;
1668
1669        LMC_CSR_WRITE (sc, csr_9, dataval);
1670        lmc_delay ();
1671        /* __SLOW_DOWN_IO; */
1672        LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
1673        lmc_delay ();
1674        /* __SLOW_DOWN_IO; */
1675    }
1676
1677    for (i = 19; i > 0; i--)
1678    {
1679        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1680        lmc_delay ();
1681        /* __SLOW_DOWN_IO; */
1682        retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
1683        LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
1684        lmc_delay ();
1685        /* __SLOW_DOWN_IO; */
1686    }
1687
1688    return (retval >> 1) & 0xffff;
1689}
1690
1691void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
1692{
1693    int i = 32;
1694    int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
1695
1696    LMC_MII_SYNC (sc);
1697
1698    i = 31;
1699    while (i >= 0)
1700    {
1701        int datav;
1702
1703        if (command & (1 << i))
1704            datav = 0x20000;
1705        else
1706            datav = 0x00000;
1707
1708        LMC_CSR_WRITE (sc, csr_9, datav);
1709        lmc_delay ();
1710        /* __SLOW_DOWN_IO; */
1711        LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
1712        lmc_delay ();
1713        /* __SLOW_DOWN_IO; */
1714        i--;
1715    }
1716
1717    i = 2;
1718    while (i > 0)
1719    {
1720        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1721        lmc_delay ();
1722        /* __SLOW_DOWN_IO; */
1723        LMC_CSR_WRITE (sc, csr_9, 0x50000);
1724        lmc_delay ();
1725        /* __SLOW_DOWN_IO; */
1726        i--;
1727    }
1728}
1729
1730static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1731{
1732    int i;
1733
1734    /* Initialize the receive rings and buffers. */
1735    sc->lmc_txfull = 0;
1736    sc->lmc_next_rx = 0;
1737    sc->lmc_next_tx = 0;
1738    sc->lmc_taint_rx = 0;
1739    sc->lmc_taint_tx = 0;
1740
1741    /*
1742     * Setup each one of the receiver buffers
1743     * allocate an skbuff for each one, setup the descriptor table
1744     * and point each buffer at the next one
1745     */
1746
1747    for (i = 0; i < LMC_RXDESCS; i++)
1748    {
1749        struct sk_buff *skb;
1750
1751        if (sc->lmc_rxq[i] == NULL)
1752        {
1753            skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1754            if(skb == NULL){
1755                printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
1756                sc->failed_ring = 1;
1757                break;
1758            }
1759            else{
1760                sc->lmc_rxq[i] = skb;
1761            }
1762        }
1763        else
1764        {
1765            skb = sc->lmc_rxq[i];
1766        }
1767
1768        skb->dev = sc->lmc_device;
1769
1770        /* owned by 21140 */
1771        sc->lmc_rxring[i].status = 0x80000000;
1772
1773        /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1774        sc->lmc_rxring[i].length = skb_tailroom(skb);
1775
1776        /* use to be tail which is dumb since you're thinking why write
1777         * to the end of the packj,et but since there's nothing there tail == data
1778         */
1779        sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
1780
1781        /* This is fair since the structure is static and we have the next address */
1782        sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
1783
1784    }
1785
1786    /*
1787     * Sets end of ring
1788     */
1789    if (i != 0) {
1790        sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
1791        sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
1792    }
1793    LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
1794
1795    /* Initialize the transmit rings and buffers */
1796    for (i = 0; i < LMC_TXDESCS; i++)
1797    {
1798        if (sc->lmc_txq[i] != NULL){            /* have buffer */
1799            dev_kfree_skb(sc->lmc_txq[i]);      /* free it */
1800            sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
1801        }
1802        sc->lmc_txq[i] = NULL;
1803        sc->lmc_txring[i].status = 0x00000000;
1804        sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
1805    }
1806    sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
1807    LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
1808}
1809
1810void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1811{
1812    sc->lmc_gpio_io &= ~bits;
1813    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1814}
1815
1816void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1817{
1818    sc->lmc_gpio_io |= bits;
1819    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1820}
1821
1822void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
1823{
1824    if ((~sc->lmc_miireg16) & led) /* Already on! */
1825        return;
1826
1827    sc->lmc_miireg16 &= ~led;
1828    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1829}
1830
1831void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
1832{
1833    if (sc->lmc_miireg16 & led) /* Already set don't do anything */
1834        return;
1835
1836    sc->lmc_miireg16 |= led;
1837    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1838}
1839
1840static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
1841{
1842    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
1843    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1844
1845    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
1846    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1847
1848    /*
1849     * make some of the GPIO pins be outputs
1850     */
1851    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
1852
1853    /*
1854     * RESET low to force state reset.  This also forces
1855     * the transmitter clock to be internal, but we expect to reset
1856     * that later anyway.
1857     */
1858    sc->lmc_gpio &= ~(LMC_GEP_RESET);
1859    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
1860
1861    /*
1862     * hold for more than 10 microseconds
1863     */
1864    udelay(50);
1865
1866    /*
1867     * stop driving Xilinx-related signals
1868     */
1869    lmc_gpio_mkinput(sc, LMC_GEP_RESET);
1870
1871    /*
1872     * Call media specific init routine
1873     */
1874    sc->lmc_media->init(sc);
1875
1876    sc->extra_stats.resetCount++;
1877}
1878
1879static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
1880{
1881    u32 val;
1882
1883    /*
1884     * disable all interrupts
1885     */
1886    sc->lmc_intrmask = 0;
1887    LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
1888
1889    /*
1890     * Reset the chip with a software reset command.
1891     * Wait 10 microseconds (actually 50 PCI cycles but at
1892     * 33MHz that comes to two microseconds but wait a
1893     * bit longer anyways)
1894     */
1895    LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
1896    udelay(25);
1897#ifdef __sparc__
1898    sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
1899    sc->lmc_busmode = 0x00100000;
1900    sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
1901    LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
1902#endif
1903    sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
1904
1905    /*
1906     * We want:
1907     *   no ethernet address in frames we write
1908     *   disable padding (txdesc, padding disable)
1909     *   ignore runt frames (rdes0 bit 15)
1910     *   no receiver watchdog or transmitter jabber timer
1911     *       (csr15 bit 0,14 == 1)
1912     *   if using 16-bit CRC, turn off CRC (trans desc, crc disable)
1913     */
1914
1915    sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
1916                         | TULIP_CMD_FULLDUPLEX
1917                         | TULIP_CMD_PASSBADPKT
1918                         | TULIP_CMD_NOHEARTBEAT
1919                         | TULIP_CMD_PORTSELECT
1920                         | TULIP_CMD_RECEIVEALL
1921                         | TULIP_CMD_MUSTBEONE
1922                       );
1923    sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
1924                          | TULIP_CMD_THRESHOLDCTL
1925                          | TULIP_CMD_STOREFWD
1926                          | TULIP_CMD_TXTHRSHLDCTL
1927                        );
1928
1929    LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
1930
1931    /*
1932     * disable receiver watchdog and transmit jabber
1933     */
1934    val = LMC_CSR_READ(sc, csr_sia_general);
1935    val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
1936    LMC_CSR_WRITE(sc, csr_sia_general, val);
1937}
1938
1939static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
1940                         size_t csr_size)
1941{
1942    sc->lmc_csrs.csr_busmode            = csr_base +  0 * csr_size;
1943    sc->lmc_csrs.csr_txpoll             = csr_base +  1 * csr_size;
1944    sc->lmc_csrs.csr_rxpoll             = csr_base +  2 * csr_size;
1945    sc->lmc_csrs.csr_rxlist             = csr_base +  3 * csr_size;
1946    sc->lmc_csrs.csr_txlist             = csr_base +  4 * csr_size;
1947    sc->lmc_csrs.csr_status             = csr_base +  5 * csr_size;
1948    sc->lmc_csrs.csr_command            = csr_base +  6 * csr_size;
1949    sc->lmc_csrs.csr_intr               = csr_base +  7 * csr_size;
1950    sc->lmc_csrs.csr_missed_frames      = csr_base +  8 * csr_size;
1951    sc->lmc_csrs.csr_9                  = csr_base +  9 * csr_size;
1952    sc->lmc_csrs.csr_10                 = csr_base + 10 * csr_size;
1953    sc->lmc_csrs.csr_11                 = csr_base + 11 * csr_size;
1954    sc->lmc_csrs.csr_12                 = csr_base + 12 * csr_size;
1955    sc->lmc_csrs.csr_13                 = csr_base + 13 * csr_size;
1956    sc->lmc_csrs.csr_14                 = csr_base + 14 * csr_size;
1957    sc->lmc_csrs.csr_15                 = csr_base + 15 * csr_size;
1958}
1959
1960static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
1961{
1962    lmc_softc_t *sc = dev_to_sc(dev);
1963    u32 csr6;
1964    unsigned long flags;
1965
1966    spin_lock_irqsave(&sc->lmc_lock, flags);
1967
1968    printk("%s: Xmitter busy|\n", dev->name);
1969
1970    sc->extra_stats.tx_tbusy_calls++;
1971    if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
1972            goto bug_out;
1973
1974    /*
1975     * Chip seems to have locked up
1976     * Reset it
1977     * This whips out all our descriptor
1978     * table and starts from scartch
1979     */
1980
1981    LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
1982                  LMC_CSR_READ (sc, csr_status),
1983                  sc->extra_stats.tx_ProcTimeout);
1984
1985    lmc_running_reset (dev);
1986
1987    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1988    LMC_EVENT_LOG(LMC_EVENT_RESET2,
1989                  lmc_mii_readreg (sc, 0, 16),
1990                  lmc_mii_readreg (sc, 0, 17));
1991
1992    /* restart the tx processes */
1993    csr6 = LMC_CSR_READ (sc, csr_command);
1994    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
1995    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
1996
1997    /* immediate transmit */
1998    LMC_CSR_WRITE (sc, csr_txpoll, 0);
1999
2000    sc->lmc_device->stats.tx_errors++;
2001    sc->extra_stats.tx_ProcTimeout++; /* -baz */
2002
2003    netif_trans_update(dev); /* prevent tx timeout */
2004
2005bug_out:
2006
2007    spin_unlock_irqrestore(&sc->lmc_lock, flags);
2008}
2009