linux/drivers/net/wan/lmc/lmc_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
   4  * All rights reserved.  www.lanmedia.com
   5  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
   6  *
   7  * This code is written by:
   8  * Andrew Stanley-Jones (asj@cban.com)
   9  * Rob Braun (bbraun@vix.com),
  10  * Michael Graff (explorer@vix.com) and
  11  * Matt Thomas (matt@3am-software.com).
  12  *
  13  * With Help By:
  14  * David Boggs
  15  * Ron Crane
  16  * Alan Cox
  17  *
  18  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
  19  *
  20  * To control link specific options lmcctl is required.
  21  * It can be obtained from ftp.lanmedia.com.
  22  *
  23  * Linux driver notes:
  24  * Linux uses the device struct lmc_private to pass private information
  25  * around.
  26  *
  27  * The initialization portion of this driver (the lmc_reset() and the
  28  * lmc_dec_reset() functions, as well as the led controls and the
  29  * lmc_initcsrs() functions.
  30  *
  31  * The watchdog function runs every second and checks to see if
  32  * we still have link, and that the timing source is what we expected
  33  * it to be.  If link is lost, the interface is marked down, and
  34  * we no longer can transmit.
  35  */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/string.h>
  40#include <linux/timer.h>
  41#include <linux/ptrace.h>
  42#include <linux/errno.h>
  43#include <linux/ioport.h>
  44#include <linux/slab.h>
  45#include <linux/interrupt.h>
  46#include <linux/pci.h>
  47#include <linux/delay.h>
  48#include <linux/hdlc.h>
  49#include <linux/in.h>
  50#include <linux/if_arp.h>
  51#include <linux/netdevice.h>
  52#include <linux/etherdevice.h>
  53#include <linux/skbuff.h>
  54#include <linux/inet.h>
  55#include <linux/bitops.h>
  56#include <asm/processor.h>             /* Processor type for cache alignment. */
  57#include <asm/io.h>
  58#include <asm/dma.h>
  59#include <linux/uaccess.h>
  60//#include <asm/spinlock.h>
  61
  62#define DRIVER_MAJOR_VERSION     1
  63#define DRIVER_MINOR_VERSION    34
  64#define DRIVER_SUB_VERSION       0
  65
  66#define DRIVER_VERSION  ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
  67
  68#include "lmc.h"
  69#include "lmc_var.h"
  70#include "lmc_ioctl.h"
  71#include "lmc_debug.h"
  72#include "lmc_proto.h"
  73
  74static int LMC_PKT_BUF_SZ = 1542;
  75
  76static const struct pci_device_id lmc_pci_tbl[] = {
  77        { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  78          PCI_VENDOR_ID_LMC, PCI_ANY_ID },
  79        { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  80          PCI_ANY_ID, PCI_VENDOR_ID_LMC },
  81        { 0 }
  82};
  83
  84MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
  85MODULE_LICENSE("GPL v2");
  86
  87
  88static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
  89                                        struct net_device *dev);
  90static int lmc_rx (struct net_device *dev);
  91static int lmc_open(struct net_device *dev);
  92static int lmc_close(struct net_device *dev);
  93static struct net_device_stats *lmc_get_stats(struct net_device *dev);
  94static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
  95static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
  96static void lmc_softreset(lmc_softc_t * const);
  97static void lmc_running_reset(struct net_device *dev);
  98static int lmc_ifdown(struct net_device * const);
  99static void lmc_watchdog(struct timer_list *t);
 100static void lmc_reset(lmc_softc_t * const sc);
 101static void lmc_dec_reset(lmc_softc_t * const sc);
 102static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
 103
 104/*
 105 * linux reserves 16 device specific IOCTLs.  We call them
 106 * LMCIOC* to control various bits of our world.
 107 */
 108int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
 109{
 110    lmc_softc_t *sc = dev_to_sc(dev);
 111    lmc_ctl_t ctl;
 112    int ret = -EOPNOTSUPP;
 113    u16 regVal;
 114    unsigned long flags;
 115
 116    lmc_trace(dev, "lmc_ioctl in");
 117
 118    /*
 119     * Most functions mess with the structure
 120     * Disable interrupts while we do the polling
 121     */
 122
 123    switch (cmd) {
 124        /*
 125         * Return current driver state.  Since we keep this up
 126         * To date internally, just copy this out to the user.
 127         */
 128    case LMCIOCGINFO: /*fold01*/
 129        if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
 130                ret = -EFAULT;
 131        else
 132                ret = 0;
 133        break;
 134
 135    case LMCIOCSINFO: /*fold01*/
 136        if (!capable(CAP_NET_ADMIN)) {
 137            ret = -EPERM;
 138            break;
 139        }
 140
 141        if(dev->flags & IFF_UP){
 142            ret = -EBUSY;
 143            break;
 144        }
 145
 146        if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
 147                ret = -EFAULT;
 148                break;
 149        }
 150
 151        spin_lock_irqsave(&sc->lmc_lock, flags);
 152        sc->lmc_media->set_status (sc, &ctl);
 153
 154        if(ctl.crc_length != sc->ictl.crc_length) {
 155            sc->lmc_media->set_crc_length(sc, ctl.crc_length);
 156            if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
 157                sc->TxDescriptControlInit |=  LMC_TDES_ADD_CRC_DISABLE;
 158            else
 159                sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
 160        }
 161        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 162
 163        ret = 0;
 164        break;
 165
 166    case LMCIOCIFTYPE: /*fold01*/
 167        {
 168            u16 old_type = sc->if_type;
 169            u16 new_type;
 170
 171            if (!capable(CAP_NET_ADMIN)) {
 172                ret = -EPERM;
 173                break;
 174            }
 175
 176            if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
 177                ret = -EFAULT;
 178                break;
 179            }
 180
 181            
 182            if (new_type == old_type)
 183            {
 184                ret = 0 ;
 185                break;                          /* no change */
 186            }
 187            
 188            spin_lock_irqsave(&sc->lmc_lock, flags);
 189            lmc_proto_close(sc);
 190
 191            sc->if_type = new_type;
 192            lmc_proto_attach(sc);
 193            ret = lmc_proto_open(sc);
 194            spin_unlock_irqrestore(&sc->lmc_lock, flags);
 195            break;
 196        }
 197
 198    case LMCIOCGETXINFO: /*fold01*/
 199        spin_lock_irqsave(&sc->lmc_lock, flags);
 200        sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
 201
 202        sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
 203        sc->lmc_xinfo.PciSlotNumber = 0;
 204        sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
 205        sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
 206        sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
 207        sc->lmc_xinfo.XilinxRevisionNumber =
 208            lmc_mii_readreg (sc, 0, 3) & 0xf;
 209        sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
 210        sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
 211        sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
 212        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 213
 214        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
 215
 216        if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
 217                         sizeof(struct lmc_xinfo)))
 218                ret = -EFAULT;
 219        else
 220                ret = 0;
 221
 222        break;
 223
 224    case LMCIOCGETLMCSTATS:
 225            spin_lock_irqsave(&sc->lmc_lock, flags);
 226            if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
 227                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
 228                    sc->extra_stats.framingBitErrorCount +=
 229                            lmc_mii_readreg(sc, 0, 18) & 0xff;
 230                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
 231                    sc->extra_stats.framingBitErrorCount +=
 232                            (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 233                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
 234                    sc->extra_stats.lineCodeViolationCount +=
 235                            lmc_mii_readreg(sc, 0, 18) & 0xff;
 236                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
 237                    sc->extra_stats.lineCodeViolationCount +=
 238                            (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 239                    lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
 240                    regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
 241
 242                    sc->extra_stats.lossOfFrameCount +=
 243                            (regVal & T1FRAMER_LOF_MASK) >> 4;
 244                    sc->extra_stats.changeOfFrameAlignmentCount +=
 245                            (regVal & T1FRAMER_COFA_MASK) >> 2;
 246                    sc->extra_stats.severelyErroredFrameCount +=
 247                            regVal & T1FRAMER_SEF_MASK;
 248            }
 249            spin_unlock_irqrestore(&sc->lmc_lock, flags);
 250            if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
 251                             sizeof(sc->lmc_device->stats)) ||
 252                copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
 253                             &sc->extra_stats, sizeof(sc->extra_stats)))
 254                    ret = -EFAULT;
 255            else
 256                    ret = 0;
 257            break;
 258
 259    case LMCIOCCLEARLMCSTATS:
 260            if (!capable(CAP_NET_ADMIN)) {
 261                    ret = -EPERM;
 262                    break;
 263            }
 264
 265            spin_lock_irqsave(&sc->lmc_lock, flags);
 266            memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
 267            memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
 268            sc->extra_stats.check = STATCHECK;
 269            sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 270                    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 271            sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 272            spin_unlock_irqrestore(&sc->lmc_lock, flags);
 273            ret = 0;
 274            break;
 275
 276    case LMCIOCSETCIRCUIT: /*fold01*/
 277        if (!capable(CAP_NET_ADMIN)){
 278            ret = -EPERM;
 279            break;
 280        }
 281
 282        if(dev->flags & IFF_UP){
 283            ret = -EBUSY;
 284            break;
 285        }
 286
 287        if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
 288                ret = -EFAULT;
 289                break;
 290        }
 291        spin_lock_irqsave(&sc->lmc_lock, flags);
 292        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
 293        sc->ictl.circuit_type = ctl.circuit_type;
 294        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 295        ret = 0;
 296
 297        break;
 298
 299    case LMCIOCRESET: /*fold01*/
 300        if (!capable(CAP_NET_ADMIN)){
 301            ret = -EPERM;
 302            break;
 303        }
 304
 305        spin_lock_irqsave(&sc->lmc_lock, flags);
 306        /* Reset driver and bring back to current state */
 307        printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 308        lmc_running_reset (dev);
 309        printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 310
 311        LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 312        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 313
 314        ret = 0;
 315        break;
 316
 317#ifdef DEBUG
 318    case LMCIOCDUMPEVENTLOG:
 319        if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
 320                ret = -EFAULT;
 321                break;
 322        }
 323        if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
 324                         sizeof(lmcEventLogBuf)))
 325                ret = -EFAULT;
 326        else
 327                ret = 0;
 328
 329        break;
 330#endif /* end ifdef _DBG_EVENTLOG */
 331    case LMCIOCT1CONTROL: /*fold01*/
 332        if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
 333            ret = -EOPNOTSUPP;
 334            break;
 335        }
 336        break;
 337    case LMCIOCXILINX: /*fold01*/
 338        {
 339            struct lmc_xilinx_control xc; /*fold02*/
 340
 341            if (!capable(CAP_NET_ADMIN)){
 342                ret = -EPERM;
 343                break;
 344            }
 345
 346            /*
 347             * Stop the xwitter whlie we restart the hardware
 348             */
 349            netif_stop_queue(dev);
 350
 351            if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
 352                ret = -EFAULT;
 353                break;
 354            }
 355            switch(xc.command){
 356            case lmc_xilinx_reset: /*fold02*/
 357                {
 358                    u16 mii;
 359                    spin_lock_irqsave(&sc->lmc_lock, flags);
 360                    mii = lmc_mii_readreg (sc, 0, 16);
 361
 362                    /*
 363                     * Make all of them 0 and make input
 364                     */
 365                    lmc_gpio_mkinput(sc, 0xff);
 366
 367                    /*
 368                     * make the reset output
 369                     */
 370                    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
 371
 372                    /*
 373                     * RESET low to force configuration.  This also forces
 374                     * the transmitter clock to be internal, but we expect to reset
 375                     * that later anyway.
 376                     */
 377
 378                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 379                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 380
 381
 382                    /*
 383                     * hold for more than 10 microseconds
 384                     */
 385                    udelay(50);
 386
 387                    sc->lmc_gpio |= LMC_GEP_RESET;
 388                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 389
 390
 391                    /*
 392                     * stop driving Xilinx-related signals
 393                     */
 394                    lmc_gpio_mkinput(sc, 0xff);
 395
 396                    /* Reset the frammer hardware */
 397                    sc->lmc_media->set_link_status (sc, 1);
 398                    sc->lmc_media->set_status (sc, NULL);
 399//                    lmc_softreset(sc);
 400
 401                    {
 402                        int i;
 403                        for(i = 0; i < 5; i++){
 404                            lmc_led_on(sc, LMC_DS3_LED0);
 405                            mdelay(100);
 406                            lmc_led_off(sc, LMC_DS3_LED0);
 407                            lmc_led_on(sc, LMC_DS3_LED1);
 408                            mdelay(100);
 409                            lmc_led_off(sc, LMC_DS3_LED1);
 410                            lmc_led_on(sc, LMC_DS3_LED3);
 411                            mdelay(100);
 412                            lmc_led_off(sc, LMC_DS3_LED3);
 413                            lmc_led_on(sc, LMC_DS3_LED2);
 414                            mdelay(100);
 415                            lmc_led_off(sc, LMC_DS3_LED2);
 416                        }
 417                    }
 418                    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 419                    
 420                    
 421
 422                    ret = 0x0;
 423
 424                }
 425
 426                break;
 427            case lmc_xilinx_load_prom: /*fold02*/
 428                {
 429                    u16 mii;
 430                    int timeout = 500000;
 431                    spin_lock_irqsave(&sc->lmc_lock, flags);
 432                    mii = lmc_mii_readreg (sc, 0, 16);
 433
 434                    /*
 435                     * Make all of them 0 and make input
 436                     */
 437                    lmc_gpio_mkinput(sc, 0xff);
 438
 439                    /*
 440                     * make the reset output
 441                     */
 442                    lmc_gpio_mkoutput(sc,  LMC_GEP_DP | LMC_GEP_RESET);
 443
 444                    /*
 445                     * RESET low to force configuration.  This also forces
 446                     * the transmitter clock to be internal, but we expect to reset
 447                     * that later anyway.
 448                     */
 449
 450                    sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
 451                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 452
 453
 454                    /*
 455                     * hold for more than 10 microseconds
 456                     */
 457                    udelay(50);
 458
 459                    sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
 460                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 461
 462                    /*
 463                     * busy wait for the chip to reset
 464                     */
 465                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 466                           (timeout-- > 0))
 467                        cpu_relax();
 468
 469
 470                    /*
 471                     * stop driving Xilinx-related signals
 472                     */
 473                    lmc_gpio_mkinput(sc, 0xff);
 474                    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 475
 476                    ret = 0x0;
 477                    
 478
 479                    break;
 480
 481                }
 482
 483            case lmc_xilinx_load: /*fold02*/
 484                {
 485                    char *data;
 486                    int pos;
 487                    int timeout = 500000;
 488
 489                    if (!xc.data) {
 490                            ret = -EINVAL;
 491                            break;
 492                    }
 493
 494                    data = memdup_user(xc.data, xc.len);
 495                    if (IS_ERR(data)) {
 496                            ret = PTR_ERR(data);
 497                            break;
 498                    }
 499
 500                    printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
 501
 502                    spin_lock_irqsave(&sc->lmc_lock, flags);
 503                    lmc_gpio_mkinput(sc, 0xff);
 504
 505                    /*
 506                     * Clear the Xilinx and start prgramming from the DEC
 507                     */
 508
 509                    /*
 510                     * Set ouput as:
 511                     * Reset: 0 (active)
 512                     * DP:    0 (active)
 513                     * Mode:  1
 514                     *
 515                     */
 516                    sc->lmc_gpio = 0x00;
 517                    sc->lmc_gpio &= ~LMC_GEP_DP;
 518                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 519                    sc->lmc_gpio |=  LMC_GEP_MODE;
 520                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 521
 522                    lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
 523
 524                    /*
 525                     * Wait at least 10 us 20 to be safe
 526                     */
 527                    udelay(50);
 528
 529                    /*
 530                     * Clear reset and activate programming lines
 531                     * Reset: Input
 532                     * DP:    Input
 533                     * Clock: Output
 534                     * Data:  Output
 535                     * Mode:  Output
 536                     */
 537                    lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
 538
 539                    /*
 540                     * Set LOAD, DATA, Clock to 1
 541                     */
 542                    sc->lmc_gpio = 0x00;
 543                    sc->lmc_gpio |= LMC_GEP_MODE;
 544                    sc->lmc_gpio |= LMC_GEP_DATA;
 545                    sc->lmc_gpio |= LMC_GEP_CLK;
 546                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 547                    
 548                    lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
 549
 550                    /*
 551                     * busy wait for the chip to reset
 552                     */
 553                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 554                           (timeout-- > 0))
 555                        cpu_relax();
 556
 557                    printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
 558
 559                    for(pos = 0; pos < xc.len; pos++){
 560                        switch(data[pos]){
 561                        case 0:
 562                            sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
 563                            break;
 564                        case 1:
 565                            sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
 566                            break;
 567                        default:
 568                            printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
 569                            sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
 570                        }
 571                        sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
 572                        sc->lmc_gpio |= LMC_GEP_MODE;
 573                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 574                        udelay(1);
 575                        
 576                        sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
 577                        sc->lmc_gpio |= LMC_GEP_MODE;
 578                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 579                        udelay(1);
 580                    }
 581                    if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
 582                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
 583                    }
 584                    else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
 585                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
 586                    }
 587                    else {
 588                        printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
 589                    }
 590
 591                    lmc_gpio_mkinput(sc, 0xff);
 592                    
 593                    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
 594                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 595
 596                    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
 597                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 598                    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 599
 600                    kfree(data);
 601                    
 602                    ret = 0;
 603                    
 604                    break;
 605                }
 606            default: /*fold02*/
 607                ret = -EBADE;
 608                break;
 609            }
 610
 611            netif_wake_queue(dev);
 612            sc->lmc_txfull = 0;
 613
 614        }
 615        break;
 616    default: /*fold01*/
 617        /* If we don't know what to do, give the protocol a shot. */
 618        ret = lmc_proto_ioctl (sc, ifr, cmd);
 619        break;
 620    }
 621
 622    lmc_trace(dev, "lmc_ioctl out");
 623
 624    return ret;
 625}
 626
 627
 628/* the watchdog process that cruises around */
 629static void lmc_watchdog(struct timer_list *t) /*fold00*/
 630{
 631    lmc_softc_t *sc = from_timer(sc, t, timer);
 632    struct net_device *dev = sc->lmc_device;
 633    int link_status;
 634    u32 ticks;
 635    unsigned long flags;
 636
 637    lmc_trace(dev, "lmc_watchdog in");
 638
 639    spin_lock_irqsave(&sc->lmc_lock, flags);
 640
 641    if(sc->check != 0xBEAFCAFE){
 642        printk("LMC: Corrupt net_device struct, breaking out\n");
 643        spin_unlock_irqrestore(&sc->lmc_lock, flags);
 644        return;
 645    }
 646
 647
 648    /* Make sure the tx jabber and rx watchdog are off,
 649     * and the transmit and receive processes are running.
 650     */
 651
 652    LMC_CSR_WRITE (sc, csr_15, 0x00000011);
 653    sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
 654    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
 655
 656    if (sc->lmc_ok == 0)
 657        goto kick_timer;
 658
 659    LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 660
 661    /* --- begin time out check -----------------------------------
 662     * check for a transmit interrupt timeout
 663     * Has the packet xmt vs xmt serviced threshold been exceeded */
 664    if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 665        sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 666        sc->tx_TimeoutInd == 0)
 667    {
 668
 669        /* wait for the watchdog to come around again */
 670        sc->tx_TimeoutInd = 1;
 671    }
 672    else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 673             sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 674             sc->tx_TimeoutInd)
 675    {
 676
 677        LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
 678
 679        sc->tx_TimeoutDisplay = 1;
 680        sc->extra_stats.tx_TimeoutCnt++;
 681
 682        /* DEC chip is stuck, hit it with a RESET!!!! */
 683        lmc_running_reset (dev);
 684
 685
 686        /* look at receive & transmit process state to make sure they are running */
 687        LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
 688
 689        /* look at: DSR - 02  for Reg 16
 690         *                  CTS - 08
 691         *                  DCD - 10
 692         *                  RI  - 20
 693         * for Reg 17
 694         */
 695        LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
 696
 697        /* reset the transmit timeout detection flag */
 698        sc->tx_TimeoutInd = 0;
 699        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 700        sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 701    } else {
 702        sc->tx_TimeoutInd = 0;
 703        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 704        sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 705    }
 706
 707    /* --- end time out check ----------------------------------- */
 708
 709
 710    link_status = sc->lmc_media->get_link_status (sc);
 711
 712    /*
 713     * hardware level link lost, but the interface is marked as up.
 714     * Mark it as down.
 715     */
 716    if ((link_status == 0) && (sc->last_link_status != 0)) {
 717        printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
 718        sc->last_link_status = 0;
 719        /* lmc_reset (sc); Why reset??? The link can go down ok */
 720
 721        /* Inform the world that link has been lost */
 722        netif_carrier_off(dev);
 723    }
 724
 725    /*
 726     * hardware link is up, but the interface is marked as down.
 727     * Bring it back up again.
 728     */
 729     if (link_status != 0 && sc->last_link_status == 0) {
 730         printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
 731         sc->last_link_status = 1;
 732         /* lmc_reset (sc); Again why reset??? */
 733
 734         netif_carrier_on(dev);
 735     }
 736
 737    /* Call media specific watchdog functions */
 738    sc->lmc_media->watchdog(sc);
 739
 740    /*
 741     * Poke the transmitter to make sure it
 742     * never stops, even if we run out of mem
 743     */
 744    LMC_CSR_WRITE(sc, csr_rxpoll, 0);
 745
 746    /*
 747     * Check for code that failed
 748     * and try and fix it as appropriate
 749     */
 750    if(sc->failed_ring == 1){
 751        /*
 752         * Failed to setup the recv/xmit rin
 753         * Try again
 754         */
 755        sc->failed_ring = 0;
 756        lmc_softreset(sc);
 757    }
 758    if(sc->failed_recv_alloc == 1){
 759        /*
 760         * We failed to alloc mem in the
 761         * interrupt handler, go through the rings
 762         * and rebuild them
 763         */
 764        sc->failed_recv_alloc = 0;
 765        lmc_softreset(sc);
 766    }
 767
 768
 769    /*
 770     * remember the timer value
 771     */
 772kick_timer:
 773
 774    ticks = LMC_CSR_READ (sc, csr_gp_timer);
 775    LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
 776    sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
 777
 778    /*
 779     * restart this timer.
 780     */
 781    sc->timer.expires = jiffies + (HZ);
 782    add_timer (&sc->timer);
 783
 784    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 785
 786    lmc_trace(dev, "lmc_watchdog out");
 787
 788}
 789
 790static int lmc_attach(struct net_device *dev, unsigned short encoding,
 791                      unsigned short parity)
 792{
 793        if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
 794                return 0;
 795        return -EINVAL;
 796}
 797
 798static const struct net_device_ops lmc_ops = {
 799        .ndo_open       = lmc_open,
 800        .ndo_stop       = lmc_close,
 801        .ndo_start_xmit = hdlc_start_xmit,
 802        .ndo_do_ioctl   = lmc_ioctl,
 803        .ndo_tx_timeout = lmc_driver_timeout,
 804        .ndo_get_stats  = lmc_get_stats,
 805};
 806
 807static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 808{
 809        lmc_softc_t *sc;
 810        struct net_device *dev;
 811        u16 subdevice;
 812        u16 AdapModelNum;
 813        int err;
 814        static int cards_found;
 815
 816        /* lmc_trace(dev, "lmc_init_one in"); */
 817
 818        err = pcim_enable_device(pdev);
 819        if (err) {
 820                printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
 821                return err;
 822        }
 823
 824        err = pci_request_regions(pdev, "lmc");
 825        if (err) {
 826                printk(KERN_ERR "lmc: pci_request_region failed\n");
 827                return err;
 828        }
 829
 830        /*
 831         * Allocate our own device structure
 832         */
 833        sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
 834        if (!sc)
 835                return -ENOMEM;
 836
 837        dev = alloc_hdlcdev(sc);
 838        if (!dev) {
 839                printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
 840                return -ENOMEM;
 841        }
 842
 843
 844        dev->type = ARPHRD_HDLC;
 845        dev_to_hdlc(dev)->xmit = lmc_start_xmit;
 846        dev_to_hdlc(dev)->attach = lmc_attach;
 847        dev->netdev_ops = &lmc_ops;
 848        dev->watchdog_timeo = HZ; /* 1 second */
 849        dev->tx_queue_len = 100;
 850        sc->lmc_device = dev;
 851        sc->name = dev->name;
 852        sc->if_type = LMC_PPP;
 853        sc->check = 0xBEAFCAFE;
 854        dev->base_addr = pci_resource_start(pdev, 0);
 855        dev->irq = pdev->irq;
 856        pci_set_drvdata(pdev, dev);
 857        SET_NETDEV_DEV(dev, &pdev->dev);
 858
 859        /*
 860         * This will get the protocol layer ready and do any 1 time init's
 861         * Must have a valid sc and dev structure
 862         */
 863        lmc_proto_attach(sc);
 864
 865        /* Init the spin lock so can call it latter */
 866
 867        spin_lock_init(&sc->lmc_lock);
 868        pci_set_master(pdev);
 869
 870        printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
 871               dev->base_addr, dev->irq);
 872
 873        err = register_hdlc_device(dev);
 874        if (err) {
 875                printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
 876                free_netdev(dev);
 877                return err;
 878        }
 879
 880    sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
 881    sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
 882
 883    /*
 884     *
 885     * Check either the subvendor or the subdevice, some systems reverse
 886     * the setting in the bois, seems to be version and arch dependent?
 887     * Fix the error, exchange the two values 
 888     */
 889    if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
 890            subdevice = pdev->subsystem_vendor;
 891
 892    switch (subdevice) {
 893    case PCI_DEVICE_ID_LMC_HSSI:
 894        printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
 895        sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
 896        sc->lmc_media = &lmc_hssi_media;
 897        break;
 898    case PCI_DEVICE_ID_LMC_DS3:
 899        printk(KERN_INFO "%s: LMC DS3\n", dev->name);
 900        sc->lmc_cardtype = LMC_CARDTYPE_DS3;
 901        sc->lmc_media = &lmc_ds3_media;
 902        break;
 903    case PCI_DEVICE_ID_LMC_SSI:
 904        printk(KERN_INFO "%s: LMC SSI\n", dev->name);
 905        sc->lmc_cardtype = LMC_CARDTYPE_SSI;
 906        sc->lmc_media = &lmc_ssi_media;
 907        break;
 908    case PCI_DEVICE_ID_LMC_T1:
 909        printk(KERN_INFO "%s: LMC T1\n", dev->name);
 910        sc->lmc_cardtype = LMC_CARDTYPE_T1;
 911        sc->lmc_media = &lmc_t1_media;
 912        break;
 913    default:
 914        printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
 915        break;
 916    }
 917
 918    lmc_initcsrs (sc, dev->base_addr, 8);
 919
 920    lmc_gpio_mkinput (sc, 0xff);
 921    sc->lmc_gpio = 0;           /* drive no signals yet */
 922
 923    sc->lmc_media->defaults (sc);
 924
 925    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
 926
 927    /* verify that the PCI Sub System ID matches the Adapter Model number
 928     * from the MII register
 929     */
 930    AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
 931
 932    if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
 933         subdevice != PCI_DEVICE_ID_LMC_T1) &&
 934        (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
 935         subdevice != PCI_DEVICE_ID_LMC_SSI) &&
 936        (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
 937         subdevice != PCI_DEVICE_ID_LMC_DS3) &&
 938        (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
 939         subdevice != PCI_DEVICE_ID_LMC_HSSI))
 940            printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
 941                   " Subsystem ID = 0x%04x\n",
 942                   dev->name, AdapModelNum, subdevice);
 943
 944    /*
 945     * reset clock
 946     */
 947    LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
 948
 949    sc->board_idx = cards_found++;
 950    sc->extra_stats.check = STATCHECK;
 951    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 952            sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 953    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 954
 955    sc->lmc_ok = 0;
 956    sc->last_link_status = 0;
 957
 958    lmc_trace(dev, "lmc_init_one out");
 959    return 0;
 960}
 961
 962/*
 963 * Called from pci when removing module.
 964 */
 965static void lmc_remove_one(struct pci_dev *pdev)
 966{
 967        struct net_device *dev = pci_get_drvdata(pdev);
 968
 969        if (dev) {
 970                printk(KERN_DEBUG "%s: removing...\n", dev->name);
 971                unregister_hdlc_device(dev);
 972                free_netdev(dev);
 973        }
 974}
 975
 976/* After this is called, packets can be sent.
 977 * Does not initialize the addresses
 978 */
 979static int lmc_open(struct net_device *dev)
 980{
 981    lmc_softc_t *sc = dev_to_sc(dev);
 982    int err;
 983
 984    lmc_trace(dev, "lmc_open in");
 985
 986    lmc_led_on(sc, LMC_DS3_LED0);
 987
 988    lmc_dec_reset(sc);
 989    lmc_reset(sc);
 990
 991    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
 992    LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
 993                  lmc_mii_readreg(sc, 0, 17));
 994
 995    if (sc->lmc_ok){
 996        lmc_trace(dev, "lmc_open lmc_ok out");
 997        return 0;
 998    }
 999
1000    lmc_softreset (sc);
1001
1002    /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1003    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
1004        printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
1005        lmc_trace(dev, "lmc_open irq failed out");
1006        return -EAGAIN;
1007    }
1008    sc->got_irq = 1;
1009
1010    /* Assert Terminal Active */
1011    sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
1012    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
1013
1014    /*
1015     * reset to last state.
1016     */
1017    sc->lmc_media->set_status (sc, NULL);
1018
1019    /* setup default bits to be used in tulip_desc_t transmit descriptor
1020     * -baz */
1021    sc->TxDescriptControlInit = (
1022                                 LMC_TDES_INTERRUPT_ON_COMPLETION
1023                                 | LMC_TDES_FIRST_SEGMENT
1024                                 | LMC_TDES_LAST_SEGMENT
1025                                 | LMC_TDES_SECOND_ADDR_CHAINED
1026                                 | LMC_TDES_DISABLE_PADDING
1027                                );
1028
1029    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
1030        /* disable 32 bit CRC generated by ASIC */
1031        sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
1032    }
1033    sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
1034    /* Acknoledge the Terminal Active and light LEDs */
1035
1036    /* dev->flags |= IFF_UP; */
1037
1038    if ((err = lmc_proto_open(sc)) != 0)
1039            return err;
1040
1041    netif_start_queue(dev);
1042    sc->extra_stats.tx_tbusy0++;
1043
1044    /*
1045     * select what interrupts we want to get
1046     */
1047    sc->lmc_intrmask = 0;
1048    /* Should be using the default interrupt mask defined in the .h file. */
1049    sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
1050                         | TULIP_STS_RXINTR
1051                         | TULIP_STS_TXINTR
1052                         | TULIP_STS_ABNRMLINTR
1053                         | TULIP_STS_SYSERROR
1054                         | TULIP_STS_TXSTOPPED
1055                         | TULIP_STS_TXUNDERFLOW
1056                         | TULIP_STS_RXSTOPPED
1057                         | TULIP_STS_RXNOBUF
1058                        );
1059    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1060
1061    sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
1062    sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
1063    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1064
1065    sc->lmc_ok = 1; /* Run watchdog */
1066
1067    /*
1068     * Set the if up now - pfb
1069     */
1070
1071    sc->last_link_status = 1;
1072
1073    /*
1074     * Setup a timer for the watchdog on probe, and start it running.
1075     * Since lmc_ok == 0, it will be a NOP for now.
1076     */
1077    timer_setup(&sc->timer, lmc_watchdog, 0);
1078    sc->timer.expires = jiffies + HZ;
1079    add_timer (&sc->timer);
1080
1081    lmc_trace(dev, "lmc_open out");
1082
1083    return 0;
1084}
1085
1086/* Total reset to compensate for the AdTran DSU doing bad things
1087 *  under heavy load
1088 */
1089
1090static void lmc_running_reset (struct net_device *dev) /*fold00*/
1091{
1092    lmc_softc_t *sc = dev_to_sc(dev);
1093
1094    lmc_trace(dev, "lmc_running_reset in");
1095
1096    /* stop interrupts */
1097    /* Clear the interrupt mask */
1098    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1099
1100    lmc_dec_reset (sc);
1101    lmc_reset (sc);
1102    lmc_softreset (sc);
1103    /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1104    sc->lmc_media->set_link_status (sc, 1);
1105    sc->lmc_media->set_status (sc, NULL);
1106
1107    netif_wake_queue(dev);
1108
1109    sc->lmc_txfull = 0;
1110    sc->extra_stats.tx_tbusy0++;
1111
1112    sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1113    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1114
1115    sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
1116    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1117
1118    lmc_trace(dev, "lmc_running_reset_out");
1119}
1120
1121
1122/* This is what is called when you ifconfig down a device.
1123 * This disables the timer for the watchdog and keepalives,
1124 * and disables the irq for dev.
1125 */
1126static int lmc_close(struct net_device *dev)
1127{
1128    /* not calling release_region() as we should */
1129    lmc_softc_t *sc = dev_to_sc(dev);
1130
1131    lmc_trace(dev, "lmc_close in");
1132
1133    sc->lmc_ok = 0;
1134    sc->lmc_media->set_link_status (sc, 0);
1135    del_timer (&sc->timer);
1136    lmc_proto_close(sc);
1137    lmc_ifdown (dev);
1138
1139    lmc_trace(dev, "lmc_close out");
1140
1141    return 0;
1142}
1143
1144/* Ends the transfer of packets */
1145/* When the interface goes down, this is called */
1146static int lmc_ifdown (struct net_device *dev) /*fold00*/
1147{
1148    lmc_softc_t *sc = dev_to_sc(dev);
1149    u32 csr6;
1150    int i;
1151
1152    lmc_trace(dev, "lmc_ifdown in");
1153
1154    /* Don't let anything else go on right now */
1155    //    dev->start = 0;
1156    netif_stop_queue(dev);
1157    sc->extra_stats.tx_tbusy1++;
1158
1159    /* stop interrupts */
1160    /* Clear the interrupt mask */
1161    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1162
1163    /* Stop Tx and Rx on the chip */
1164    csr6 = LMC_CSR_READ (sc, csr_command);
1165    csr6 &= ~LMC_DEC_ST;                /* Turn off the Transmission bit */
1166    csr6 &= ~LMC_DEC_SR;                /* Turn off the Receive bit */
1167    LMC_CSR_WRITE (sc, csr_command, csr6);
1168
1169    sc->lmc_device->stats.rx_missed_errors +=
1170            LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1171
1172    /* release the interrupt */
1173    if(sc->got_irq == 1){
1174        free_irq (dev->irq, dev);
1175        sc->got_irq = 0;
1176    }
1177
1178    /* free skbuffs in the Rx queue */
1179    for (i = 0; i < LMC_RXDESCS; i++)
1180    {
1181        struct sk_buff *skb = sc->lmc_rxq[i];
1182        sc->lmc_rxq[i] = NULL;
1183        sc->lmc_rxring[i].status = 0;
1184        sc->lmc_rxring[i].length = 0;
1185        sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
1186        if (skb != NULL)
1187            dev_kfree_skb(skb);
1188        sc->lmc_rxq[i] = NULL;
1189    }
1190
1191    for (i = 0; i < LMC_TXDESCS; i++)
1192    {
1193        if (sc->lmc_txq[i] != NULL)
1194            dev_kfree_skb(sc->lmc_txq[i]);
1195        sc->lmc_txq[i] = NULL;
1196    }
1197
1198    lmc_led_off (sc, LMC_MII16_LED_ALL);
1199
1200    netif_wake_queue(dev);
1201    sc->extra_stats.tx_tbusy0++;
1202
1203    lmc_trace(dev, "lmc_ifdown out");
1204
1205    return 0;
1206}
1207
1208/* Interrupt handling routine.  This will take an incoming packet, or clean
1209 * up after a trasmit.
1210 */
1211static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1212{
1213    struct net_device *dev = (struct net_device *) dev_instance;
1214    lmc_softc_t *sc = dev_to_sc(dev);
1215    u32 csr;
1216    int i;
1217    s32 stat;
1218    unsigned int badtx;
1219    u32 firstcsr;
1220    int max_work = LMC_RXDESCS;
1221    int handled = 0;
1222
1223    lmc_trace(dev, "lmc_interrupt in");
1224
1225    spin_lock(&sc->lmc_lock);
1226
1227    /*
1228     * Read the csr to find what interrupts we have (if any)
1229     */
1230    csr = LMC_CSR_READ (sc, csr_status);
1231
1232    /*
1233     * Make sure this is our interrupt
1234     */
1235    if ( ! (csr & sc->lmc_intrmask)) {
1236        goto lmc_int_fail_out;
1237    }
1238
1239    firstcsr = csr;
1240
1241    /* always go through this loop at least once */
1242    while (csr & sc->lmc_intrmask) {
1243        handled = 1;
1244
1245        /*
1246         * Clear interrupt bits, we handle all case below
1247         */
1248        LMC_CSR_WRITE (sc, csr_status, csr);
1249
1250        /*
1251         * One of
1252         *  - Transmit process timed out CSR5<1>
1253         *  - Transmit jabber timeout    CSR5<3>
1254         *  - Transmit underflow         CSR5<5>
1255         *  - Transmit Receiver buffer unavailable CSR5<7>
1256         *  - Receive process stopped    CSR5<8>
1257         *  - Receive watchdog timeout   CSR5<9>
1258         *  - Early transmit interrupt   CSR5<10>
1259         *
1260         * Is this really right? Should we do a running reset for jabber?
1261         * (being a WAN card and all)
1262         */
1263        if (csr & TULIP_STS_ABNRMLINTR){
1264            lmc_running_reset (dev);
1265            break;
1266        }
1267        
1268        if (csr & TULIP_STS_RXINTR){
1269            lmc_trace(dev, "rx interrupt");
1270            lmc_rx (dev);
1271            
1272        }
1273        if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
1274
1275            int         n_compl = 0 ;
1276            /* reset the transmit timeout detection flag -baz */
1277            sc->extra_stats.tx_NoCompleteCnt = 0;
1278
1279            badtx = sc->lmc_taint_tx;
1280            i = badtx % LMC_TXDESCS;
1281
1282            while ((badtx < sc->lmc_next_tx)) {
1283                stat = sc->lmc_txring[i].status;
1284
1285                LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
1286                                                 sc->lmc_txring[i].length);
1287                /*
1288                 * If bit 31 is 1 the tulip owns it break out of the loop
1289                 */
1290                if (stat & 0x80000000)
1291                    break;
1292
1293                n_compl++ ;             /* i.e., have an empty slot in ring */
1294                /*
1295                 * If we have no skbuff or have cleared it
1296                 * Already continue to the next buffer
1297                 */
1298                if (sc->lmc_txq[i] == NULL)
1299                    continue;
1300
1301                /*
1302                 * Check the total error summary to look for any errors
1303                 */
1304                if (stat & 0x8000) {
1305                        sc->lmc_device->stats.tx_errors++;
1306                        if (stat & 0x4104)
1307                                sc->lmc_device->stats.tx_aborted_errors++;
1308                        if (stat & 0x0C00)
1309                                sc->lmc_device->stats.tx_carrier_errors++;
1310                        if (stat & 0x0200)
1311                                sc->lmc_device->stats.tx_window_errors++;
1312                        if (stat & 0x0002)
1313                                sc->lmc_device->stats.tx_fifo_errors++;
1314                } else {
1315                        sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1316
1317                        sc->lmc_device->stats.tx_packets++;
1318                }
1319
1320                dev_consume_skb_irq(sc->lmc_txq[i]);
1321                sc->lmc_txq[i] = NULL;
1322
1323                badtx++;
1324                i = badtx % LMC_TXDESCS;
1325            }
1326
1327            if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
1328            {
1329                printk ("%s: out of sync pointer\n", dev->name);
1330                badtx += LMC_TXDESCS;
1331            }
1332            LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1333            sc->lmc_txfull = 0;
1334            netif_wake_queue(dev);
1335            sc->extra_stats.tx_tbusy0++;
1336
1337
1338#ifdef DEBUG
1339            sc->extra_stats.dirtyTx = badtx;
1340            sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1341            sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1342#endif
1343            sc->lmc_taint_tx = badtx;
1344
1345            /*
1346             * Why was there a break here???
1347             */
1348        }                       /* end handle transmit interrupt */
1349
1350        if (csr & TULIP_STS_SYSERROR) {
1351            u32 error;
1352            printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
1353            error = csr>>23 & 0x7;
1354            switch(error){
1355            case 0x000:
1356                printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
1357                break;
1358            case 0x001:
1359                printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1360                break;
1361            case 0x002:
1362                printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1363                break;
1364            default:
1365                printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
1366            }
1367            lmc_dec_reset (sc);
1368            lmc_reset (sc);
1369            LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1370            LMC_EVENT_LOG(LMC_EVENT_RESET2,
1371                          lmc_mii_readreg (sc, 0, 16),
1372                          lmc_mii_readreg (sc, 0, 17));
1373
1374        }
1375
1376        
1377        if(max_work-- <= 0)
1378            break;
1379        
1380        /*
1381         * Get current csr status to make sure
1382         * we've cleared all interrupts
1383         */
1384        csr = LMC_CSR_READ (sc, csr_status);
1385    }                           /* end interrupt loop */
1386    LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
1387
1388lmc_int_fail_out:
1389
1390    spin_unlock(&sc->lmc_lock);
1391
1392    lmc_trace(dev, "lmc_interrupt out");
1393    return IRQ_RETVAL(handled);
1394}
1395
1396static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
1397                                        struct net_device *dev)
1398{
1399    lmc_softc_t *sc = dev_to_sc(dev);
1400    u32 flag;
1401    int entry;
1402    unsigned long flags;
1403
1404    lmc_trace(dev, "lmc_start_xmit in");
1405
1406    spin_lock_irqsave(&sc->lmc_lock, flags);
1407
1408    /* normal path, tbusy known to be zero */
1409
1410    entry = sc->lmc_next_tx % LMC_TXDESCS;
1411
1412    sc->lmc_txq[entry] = skb;
1413    sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
1414
1415    LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
1416
1417#ifndef GCOM
1418    /* If the queue is less than half full, don't interrupt */
1419    if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
1420    {
1421        /* Do not interrupt on completion of this packet */
1422        flag = 0x60000000;
1423        netif_wake_queue(dev);
1424    }
1425    else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
1426    {
1427        /* This generates an interrupt on completion of this packet */
1428        flag = 0xe0000000;
1429        netif_wake_queue(dev);
1430    }
1431    else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
1432    {
1433        /* Do not interrupt on completion of this packet */
1434        flag = 0x60000000;
1435        netif_wake_queue(dev);
1436    }
1437    else
1438    {
1439        /* This generates an interrupt on completion of this packet */
1440        flag = 0xe0000000;
1441        sc->lmc_txfull = 1;
1442        netif_stop_queue(dev);
1443    }
1444#else
1445    flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
1446
1447    if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1448    {                           /* ring full, go busy */
1449        sc->lmc_txfull = 1;
1450        netif_stop_queue(dev);
1451        sc->extra_stats.tx_tbusy1++;
1452        LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1453    }
1454#endif
1455
1456
1457    if (entry == LMC_TXDESCS - 1)       /* last descriptor in ring */
1458        flag |= LMC_TDES_END_OF_RING;   /* flag as such for Tulip */
1459
1460    /* don't pad small packets either */
1461    flag = sc->lmc_txring[entry].length = (skb->len) | flag |
1462                                                sc->TxDescriptControlInit;
1463
1464    /* set the transmit timeout flag to be checked in
1465     * the watchdog timer handler. -baz
1466     */
1467
1468    sc->extra_stats.tx_NoCompleteCnt++;
1469    sc->lmc_next_tx++;
1470
1471    /* give ownership to the chip */
1472    LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
1473    sc->lmc_txring[entry].status = 0x80000000;
1474
1475    /* send now! */
1476    LMC_CSR_WRITE (sc, csr_txpoll, 0);
1477
1478    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1479
1480    lmc_trace(dev, "lmc_start_xmit_out");
1481    return NETDEV_TX_OK;
1482}
1483
1484
1485static int lmc_rx(struct net_device *dev)
1486{
1487    lmc_softc_t *sc = dev_to_sc(dev);
1488    int i;
1489    int rx_work_limit = LMC_RXDESCS;
1490    int rxIntLoopCnt;           /* debug -baz */
1491    int localLengthErrCnt = 0;
1492    long stat;
1493    struct sk_buff *skb, *nsb;
1494    u16 len;
1495
1496    lmc_trace(dev, "lmc_rx in");
1497
1498    lmc_led_on(sc, LMC_DS3_LED3);
1499
1500    rxIntLoopCnt = 0;           /* debug -baz */
1501
1502    i = sc->lmc_next_rx % LMC_RXDESCS;
1503
1504    while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
1505    {
1506        rxIntLoopCnt++;         /* debug -baz */
1507        len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1508        if ((stat & 0x0300) != 0x0300) {  /* Check first segment and last segment */
1509                if ((stat & 0x0000ffff) != 0x7fff) {
1510                        /* Oversized frame */
1511                        sc->lmc_device->stats.rx_length_errors++;
1512                        goto skip_packet;
1513                }
1514        }
1515
1516        if (stat & 0x00000008) { /* Catch a dribbling bit error */
1517                sc->lmc_device->stats.rx_errors++;
1518                sc->lmc_device->stats.rx_frame_errors++;
1519                goto skip_packet;
1520        }
1521
1522
1523        if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1524                sc->lmc_device->stats.rx_errors++;
1525                sc->lmc_device->stats.rx_crc_errors++;
1526                goto skip_packet;
1527        }
1528
1529        if (len > LMC_PKT_BUF_SZ) {
1530                sc->lmc_device->stats.rx_length_errors++;
1531                localLengthErrCnt++;
1532                goto skip_packet;
1533        }
1534
1535        if (len < sc->lmc_crcSize + 2) {
1536                sc->lmc_device->stats.rx_length_errors++;
1537                sc->extra_stats.rx_SmallPktCnt++;
1538                localLengthErrCnt++;
1539                goto skip_packet;
1540        }
1541
1542        if(stat & 0x00004000){
1543            printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
1544        }
1545
1546        len -= sc->lmc_crcSize;
1547
1548        skb = sc->lmc_rxq[i];
1549
1550        /*
1551         * We ran out of memory at some point
1552         * just allocate an skb buff and continue.
1553         */
1554        
1555        if (!skb) {
1556            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1557            if (nsb) {
1558                sc->lmc_rxq[i] = nsb;
1559                nsb->dev = dev;
1560                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1561            }
1562            sc->failed_recv_alloc = 1;
1563            goto skip_packet;
1564        }
1565        
1566        sc->lmc_device->stats.rx_packets++;
1567        sc->lmc_device->stats.rx_bytes += len;
1568
1569        LMC_CONSOLE_LOG("recv", skb->data, len);
1570
1571        /*
1572         * I'm not sure of the sanity of this
1573         * Packets could be arriving at a constant
1574         * 44.210mbits/sec and we're going to copy
1575         * them into a new buffer??
1576         */
1577        
1578        if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
1579            /*
1580             * If it's a large packet don't copy it just hand it up
1581             */
1582        give_it_anyways:
1583
1584            sc->lmc_rxq[i] = NULL;
1585            sc->lmc_rxring[i].buffer1 = 0x0;
1586
1587            skb_put (skb, len);
1588            skb->protocol = lmc_proto_type(sc, skb);
1589            skb_reset_mac_header(skb);
1590            /* skb_reset_network_header(skb); */
1591            skb->dev = dev;
1592            lmc_proto_netif(sc, skb);
1593
1594            /*
1595             * This skb will be destroyed by the upper layers, make a new one
1596             */
1597            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1598            if (nsb) {
1599                sc->lmc_rxq[i] = nsb;
1600                nsb->dev = dev;
1601                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1602                /* Transferred to 21140 below */
1603            }
1604            else {
1605                /*
1606                 * We've run out of memory, stop trying to allocate
1607                 * memory and exit the interrupt handler
1608                 *
1609                 * The chip may run out of receivers and stop
1610                 * in which care we'll try to allocate the buffer
1611                 * again.  (once a second)
1612                 */
1613                sc->extra_stats.rx_BuffAllocErr++;
1614                LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1615                sc->failed_recv_alloc = 1;
1616                goto skip_out_of_mem;
1617            }
1618        }
1619        else {
1620            nsb = dev_alloc_skb(len);
1621            if(!nsb) {
1622                goto give_it_anyways;
1623            }
1624            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
1625            
1626            nsb->protocol = lmc_proto_type(sc, nsb);
1627            skb_reset_mac_header(nsb);
1628            /* skb_reset_network_header(nsb); */
1629            nsb->dev = dev;
1630            lmc_proto_netif(sc, nsb);
1631        }
1632
1633    skip_packet:
1634        LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1635        sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
1636
1637        sc->lmc_next_rx++;
1638        i = sc->lmc_next_rx % LMC_RXDESCS;
1639        rx_work_limit--;
1640        if (rx_work_limit < 0)
1641            break;
1642    }
1643
1644    /* detect condition for LMC1000 where DSU cable attaches and fills
1645     * descriptors with bogus packets
1646     *
1647    if (localLengthErrCnt > LMC_RXDESCS - 3) {
1648        sc->extra_stats.rx_BadPktSurgeCnt++;
1649        LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1650                      sc->extra_stats.rx_BadPktSurgeCnt);
1651    } */
1652
1653    /* save max count of receive descriptors serviced */
1654    if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1655            sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1656
1657#ifdef DEBUG
1658    if (rxIntLoopCnt == 0)
1659    {
1660        for (i = 0; i < LMC_RXDESCS; i++)
1661        {
1662            if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
1663                != DESC_OWNED_BY_DC21X4)
1664            {
1665                rxIntLoopCnt++;
1666            }
1667        }
1668        LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
1669    }
1670#endif
1671
1672
1673    lmc_led_off(sc, LMC_DS3_LED3);
1674
1675skip_out_of_mem:
1676
1677    lmc_trace(dev, "lmc_rx out");
1678
1679    return 0;
1680}
1681
1682static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1683{
1684    lmc_softc_t *sc = dev_to_sc(dev);
1685    unsigned long flags;
1686
1687    lmc_trace(dev, "lmc_get_stats in");
1688
1689    spin_lock_irqsave(&sc->lmc_lock, flags);
1690
1691    sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1692
1693    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1694
1695    lmc_trace(dev, "lmc_get_stats out");
1696
1697    return &sc->lmc_device->stats;
1698}
1699
1700static struct pci_driver lmc_driver = {
1701        .name           = "lmc",
1702        .id_table       = lmc_pci_tbl,
1703        .probe          = lmc_init_one,
1704        .remove         = lmc_remove_one,
1705};
1706
1707module_pci_driver(lmc_driver);
1708
1709unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1710{
1711    int i;
1712    int command = (0xf6 << 10) | (devaddr << 5) | regno;
1713    int retval = 0;
1714
1715    lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
1716
1717    LMC_MII_SYNC (sc);
1718
1719    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
1720
1721    for (i = 15; i >= 0; i--)
1722    {
1723        int dataval = (command & (1 << i)) ? 0x20000 : 0;
1724
1725        LMC_CSR_WRITE (sc, csr_9, dataval);
1726        lmc_delay ();
1727        /* __SLOW_DOWN_IO; */
1728        LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
1729        lmc_delay ();
1730        /* __SLOW_DOWN_IO; */
1731    }
1732
1733    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
1734
1735    for (i = 19; i > 0; i--)
1736    {
1737        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1738        lmc_delay ();
1739        /* __SLOW_DOWN_IO; */
1740        retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
1741        LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
1742        lmc_delay ();
1743        /* __SLOW_DOWN_IO; */
1744    }
1745
1746    lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
1747
1748    return (retval >> 1) & 0xffff;
1749}
1750
1751void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
1752{
1753    int i = 32;
1754    int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
1755
1756    lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
1757
1758    LMC_MII_SYNC (sc);
1759
1760    i = 31;
1761    while (i >= 0)
1762    {
1763        int datav;
1764
1765        if (command & (1 << i))
1766            datav = 0x20000;
1767        else
1768            datav = 0x00000;
1769
1770        LMC_CSR_WRITE (sc, csr_9, datav);
1771        lmc_delay ();
1772        /* __SLOW_DOWN_IO; */
1773        LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
1774        lmc_delay ();
1775        /* __SLOW_DOWN_IO; */
1776        i--;
1777    }
1778
1779    i = 2;
1780    while (i > 0)
1781    {
1782        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1783        lmc_delay ();
1784        /* __SLOW_DOWN_IO; */
1785        LMC_CSR_WRITE (sc, csr_9, 0x50000);
1786        lmc_delay ();
1787        /* __SLOW_DOWN_IO; */
1788        i--;
1789    }
1790
1791    lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
1792}
1793
1794static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1795{
1796    int i;
1797
1798    lmc_trace(sc->lmc_device, "lmc_softreset in");
1799
1800    /* Initialize the receive rings and buffers. */
1801    sc->lmc_txfull = 0;
1802    sc->lmc_next_rx = 0;
1803    sc->lmc_next_tx = 0;
1804    sc->lmc_taint_rx = 0;
1805    sc->lmc_taint_tx = 0;
1806
1807    /*
1808     * Setup each one of the receiver buffers
1809     * allocate an skbuff for each one, setup the descriptor table
1810     * and point each buffer at the next one
1811     */
1812
1813    for (i = 0; i < LMC_RXDESCS; i++)
1814    {
1815        struct sk_buff *skb;
1816
1817        if (sc->lmc_rxq[i] == NULL)
1818        {
1819            skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1820            if(skb == NULL){
1821                printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
1822                sc->failed_ring = 1;
1823                break;
1824            }
1825            else{
1826                sc->lmc_rxq[i] = skb;
1827            }
1828        }
1829        else
1830        {
1831            skb = sc->lmc_rxq[i];
1832        }
1833
1834        skb->dev = sc->lmc_device;
1835
1836        /* owned by 21140 */
1837        sc->lmc_rxring[i].status = 0x80000000;
1838
1839        /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1840        sc->lmc_rxring[i].length = skb_tailroom(skb);
1841
1842        /* use to be tail which is dumb since you're thinking why write
1843         * to the end of the packj,et but since there's nothing there tail == data
1844         */
1845        sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
1846
1847        /* This is fair since the structure is static and we have the next address */
1848        sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
1849
1850    }
1851
1852    /*
1853     * Sets end of ring
1854     */
1855    if (i != 0) {
1856        sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
1857        sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
1858    }
1859    LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
1860
1861    /* Initialize the transmit rings and buffers */
1862    for (i = 0; i < LMC_TXDESCS; i++)
1863    {
1864        if (sc->lmc_txq[i] != NULL){            /* have buffer */
1865            dev_kfree_skb(sc->lmc_txq[i]);      /* free it */
1866            sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
1867        }
1868        sc->lmc_txq[i] = NULL;
1869        sc->lmc_txring[i].status = 0x00000000;
1870        sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
1871    }
1872    sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
1873    LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
1874
1875    lmc_trace(sc->lmc_device, "lmc_softreset out");
1876}
1877
1878void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1879{
1880    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1881    sc->lmc_gpio_io &= ~bits;
1882    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1883    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1884}
1885
1886void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1887{
1888    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1889    sc->lmc_gpio_io |= bits;
1890    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1891    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1892}
1893
1894void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
1895{
1896    lmc_trace(sc->lmc_device, "lmc_led_on in");
1897    if((~sc->lmc_miireg16) & led){ /* Already on! */
1898        lmc_trace(sc->lmc_device, "lmc_led_on aon out");
1899        return;
1900    }
1901    
1902    sc->lmc_miireg16 &= ~led;
1903    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1904    lmc_trace(sc->lmc_device, "lmc_led_on out");
1905}
1906
1907void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
1908{
1909    lmc_trace(sc->lmc_device, "lmc_led_off in");
1910    if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
1911        lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
1912        return;
1913    }
1914    
1915    sc->lmc_miireg16 |= led;
1916    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1917    lmc_trace(sc->lmc_device, "lmc_led_off out");
1918}
1919
1920static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
1921{
1922    lmc_trace(sc->lmc_device, "lmc_reset in");
1923    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
1924    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1925
1926    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
1927    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1928
1929    /*
1930     * make some of the GPIO pins be outputs
1931     */
1932    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
1933
1934    /*
1935     * RESET low to force state reset.  This also forces
1936     * the transmitter clock to be internal, but we expect to reset
1937     * that later anyway.
1938     */
1939    sc->lmc_gpio &= ~(LMC_GEP_RESET);
1940    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
1941
1942    /*
1943     * hold for more than 10 microseconds
1944     */
1945    udelay(50);
1946
1947    /*
1948     * stop driving Xilinx-related signals
1949     */
1950    lmc_gpio_mkinput(sc, LMC_GEP_RESET);
1951
1952    /*
1953     * Call media specific init routine
1954     */
1955    sc->lmc_media->init(sc);
1956
1957    sc->extra_stats.resetCount++;
1958    lmc_trace(sc->lmc_device, "lmc_reset out");
1959}
1960
1961static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
1962{
1963    u32 val;
1964    lmc_trace(sc->lmc_device, "lmc_dec_reset in");
1965
1966    /*
1967     * disable all interrupts
1968     */
1969    sc->lmc_intrmask = 0;
1970    LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
1971
1972    /*
1973     * Reset the chip with a software reset command.
1974     * Wait 10 microseconds (actually 50 PCI cycles but at
1975     * 33MHz that comes to two microseconds but wait a
1976     * bit longer anyways)
1977     */
1978    LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
1979    udelay(25);
1980#ifdef __sparc__
1981    sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
1982    sc->lmc_busmode = 0x00100000;
1983    sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
1984    LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
1985#endif
1986    sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
1987
1988    /*
1989     * We want:
1990     *   no ethernet address in frames we write
1991     *   disable padding (txdesc, padding disable)
1992     *   ignore runt frames (rdes0 bit 15)
1993     *   no receiver watchdog or transmitter jabber timer
1994     *       (csr15 bit 0,14 == 1)
1995     *   if using 16-bit CRC, turn off CRC (trans desc, crc disable)
1996     */
1997
1998    sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
1999                         | TULIP_CMD_FULLDUPLEX
2000                         | TULIP_CMD_PASSBADPKT
2001                         | TULIP_CMD_NOHEARTBEAT
2002                         | TULIP_CMD_PORTSELECT
2003                         | TULIP_CMD_RECEIVEALL
2004                         | TULIP_CMD_MUSTBEONE
2005                       );
2006    sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
2007                          | TULIP_CMD_THRESHOLDCTL
2008                          | TULIP_CMD_STOREFWD
2009                          | TULIP_CMD_TXTHRSHLDCTL
2010                        );
2011
2012    LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
2013
2014    /*
2015     * disable receiver watchdog and transmit jabber
2016     */
2017    val = LMC_CSR_READ(sc, csr_sia_general);
2018    val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
2019    LMC_CSR_WRITE(sc, csr_sia_general, val);
2020
2021    lmc_trace(sc->lmc_device, "lmc_dec_reset out");
2022}
2023
2024static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
2025                         size_t csr_size)
2026{
2027    lmc_trace(sc->lmc_device, "lmc_initcsrs in");
2028    sc->lmc_csrs.csr_busmode            = csr_base +  0 * csr_size;
2029    sc->lmc_csrs.csr_txpoll             = csr_base +  1 * csr_size;
2030    sc->lmc_csrs.csr_rxpoll             = csr_base +  2 * csr_size;
2031    sc->lmc_csrs.csr_rxlist             = csr_base +  3 * csr_size;
2032    sc->lmc_csrs.csr_txlist             = csr_base +  4 * csr_size;
2033    sc->lmc_csrs.csr_status             = csr_base +  5 * csr_size;
2034    sc->lmc_csrs.csr_command            = csr_base +  6 * csr_size;
2035    sc->lmc_csrs.csr_intr               = csr_base +  7 * csr_size;
2036    sc->lmc_csrs.csr_missed_frames      = csr_base +  8 * csr_size;
2037    sc->lmc_csrs.csr_9                  = csr_base +  9 * csr_size;
2038    sc->lmc_csrs.csr_10                 = csr_base + 10 * csr_size;
2039    sc->lmc_csrs.csr_11                 = csr_base + 11 * csr_size;
2040    sc->lmc_csrs.csr_12                 = csr_base + 12 * csr_size;
2041    sc->lmc_csrs.csr_13                 = csr_base + 13 * csr_size;
2042    sc->lmc_csrs.csr_14                 = csr_base + 14 * csr_size;
2043    sc->lmc_csrs.csr_15                 = csr_base + 15 * csr_size;
2044    lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2045}
2046
2047static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
2048{
2049    lmc_softc_t *sc = dev_to_sc(dev);
2050    u32 csr6;
2051    unsigned long flags;
2052
2053    lmc_trace(dev, "lmc_driver_timeout in");
2054
2055    spin_lock_irqsave(&sc->lmc_lock, flags);
2056
2057    printk("%s: Xmitter busy|\n", dev->name);
2058
2059    sc->extra_stats.tx_tbusy_calls++;
2060    if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
2061            goto bug_out;
2062
2063    /*
2064     * Chip seems to have locked up
2065     * Reset it
2066     * This whips out all our descriptor
2067     * table and starts from scartch
2068     */
2069
2070    LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2071                  LMC_CSR_READ (sc, csr_status),
2072                  sc->extra_stats.tx_ProcTimeout);
2073
2074    lmc_running_reset (dev);
2075
2076    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
2077    LMC_EVENT_LOG(LMC_EVENT_RESET2,
2078                  lmc_mii_readreg (sc, 0, 16),
2079                  lmc_mii_readreg (sc, 0, 17));
2080
2081    /* restart the tx processes */
2082    csr6 = LMC_CSR_READ (sc, csr_command);
2083    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
2084    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
2085
2086    /* immediate transmit */
2087    LMC_CSR_WRITE (sc, csr_txpoll, 0);
2088
2089    sc->lmc_device->stats.tx_errors++;
2090    sc->extra_stats.tx_ProcTimeout++; /* -baz */
2091
2092    netif_trans_update(dev); /* prevent tx timeout */
2093
2094bug_out:
2095
2096    spin_unlock_irqrestore(&sc->lmc_lock, flags);
2097
2098    lmc_trace(dev, "lmc_driver_timeout out");
2099
2100
2101}
2102