linux/drivers/net/tc35815.c
<<
>>
Prefs
   1/*
   2 * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
   3 *
   4 * Based on skelton.c by Donald Becker.
   5 *
   6 * This driver is a replacement of older and less maintained version.
   7 * This is a header of the older version:
   8 *      -----<snip>-----
   9 *      Copyright 2001 MontaVista Software Inc.
  10 *      Author: MontaVista Software, Inc.
  11 *              ahennessy@mvista.com
  12 *      Copyright (C) 2000-2001 Toshiba Corporation
  13 *      static const char *version =
  14 *              "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
  15 *      -----<snip>-----
  16 *
  17 * This file is subject to the terms and conditions of the GNU General Public
  18 * License.  See the file "COPYING" in the main directory of this archive
  19 * for more details.
  20 *
  21 * (C) Copyright TOSHIBA CORPORATION 2004-2005
  22 * All Rights Reserved.
  23 */
  24
  25#ifdef TC35815_NAPI
  26#define DRV_VERSION     "1.38-NAPI"
  27#else
  28#define DRV_VERSION     "1.38"
  29#endif
  30static const char *version = "tc35815.c:v" DRV_VERSION "\n";
  31#define MODNAME                 "tc35815"
  32
  33#include <linux/module.h>
  34#include <linux/kernel.h>
  35#include <linux/types.h>
  36#include <linux/fcntl.h>
  37#include <linux/interrupt.h>
  38#include <linux/ioport.h>
  39#include <linux/in.h>
  40#include <linux/if_vlan.h>
  41#include <linux/slab.h>
  42#include <linux/string.h>
  43#include <linux/spinlock.h>
  44#include <linux/errno.h>
  45#include <linux/init.h>
  46#include <linux/netdevice.h>
  47#include <linux/etherdevice.h>
  48#include <linux/skbuff.h>
  49#include <linux/delay.h>
  50#include <linux/pci.h>
  51#include <linux/phy.h>
  52#include <linux/workqueue.h>
  53#include <linux/platform_device.h>
  54#include <asm/io.h>
  55#include <asm/byteorder.h>
  56
  57/* First, a few definitions that the brave might change. */
  58
  59#define GATHER_TXINT    /* On-Demand Tx Interrupt */
  60#define WORKAROUND_LOSTCAR
  61#define WORKAROUND_100HALF_PROMISC
  62/* #define TC35815_USE_PACKEDBUFFER */
  63
  64enum tc35815_chiptype {
  65        TC35815CF = 0,
  66        TC35815_NWU,
  67        TC35815_TX4939,
  68};
  69
  70/* indexed by tc35815_chiptype, above */
  71static const struct {
  72        const char *name;
  73} chip_info[] __devinitdata = {
  74        { "TOSHIBA TC35815CF 10/100BaseTX" },
  75        { "TOSHIBA TC35815 with Wake on LAN" },
  76        { "TOSHIBA TC35815/TX4939" },
  77};
  78
  79static const struct pci_device_id tc35815_pci_tbl[] = {
  80        {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
  81        {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
  82        {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
  83        {0,}
  84};
  85MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
  86
  87/* see MODULE_PARM_DESC */
  88static struct tc35815_options {
  89        int speed;
  90        int duplex;
  91} options;
  92
  93/*
  94 * Registers
  95 */
  96struct tc35815_regs {
  97        __u32 DMA_Ctl;          /* 0x00 */
  98        __u32 TxFrmPtr;
  99        __u32 TxThrsh;
 100        __u32 TxPollCtr;
 101        __u32 BLFrmPtr;
 102        __u32 RxFragSize;
 103        __u32 Int_En;
 104        __u32 FDA_Bas;
 105        __u32 FDA_Lim;          /* 0x20 */
 106        __u32 Int_Src;
 107        __u32 unused0[2];
 108        __u32 PauseCnt;
 109        __u32 RemPauCnt;
 110        __u32 TxCtlFrmStat;
 111        __u32 unused1;
 112        __u32 MAC_Ctl;          /* 0x40 */
 113        __u32 CAM_Ctl;
 114        __u32 Tx_Ctl;
 115        __u32 Tx_Stat;
 116        __u32 Rx_Ctl;
 117        __u32 Rx_Stat;
 118        __u32 MD_Data;
 119        __u32 MD_CA;
 120        __u32 CAM_Adr;          /* 0x60 */
 121        __u32 CAM_Data;
 122        __u32 CAM_Ena;
 123        __u32 PROM_Ctl;
 124        __u32 PROM_Data;
 125        __u32 Algn_Cnt;
 126        __u32 CRC_Cnt;
 127        __u32 Miss_Cnt;
 128};
 129
 130/*
 131 * Bit assignments
 132 */
 133/* DMA_Ctl bit asign ------------------------------------------------------- */
 134#define DMA_RxAlign            0x00c00000 /* 1:Reception Alignment           */
 135#define DMA_RxAlign_1          0x00400000
 136#define DMA_RxAlign_2          0x00800000
 137#define DMA_RxAlign_3          0x00c00000
 138#define DMA_M66EnStat          0x00080000 /* 1:66MHz Enable State            */
 139#define DMA_IntMask            0x00040000 /* 1:Interupt mask                 */
 140#define DMA_SWIntReq           0x00020000 /* 1:Software Interrupt request    */
 141#define DMA_TxWakeUp           0x00010000 /* 1:Transmit Wake Up              */
 142#define DMA_RxBigE             0x00008000 /* 1:Receive Big Endian            */
 143#define DMA_TxBigE             0x00004000 /* 1:Transmit Big Endian           */
 144#define DMA_TestMode           0x00002000 /* 1:Test Mode                     */
 145#define DMA_PowrMgmnt          0x00001000 /* 1:Power Management              */
 146#define DMA_DmBurst_Mask       0x000001fc /* DMA Burst size                  */
 147
 148/* RxFragSize bit asign ---------------------------------------------------- */
 149#define RxFrag_EnPack          0x00008000 /* 1:Enable Packing                */
 150#define RxFrag_MinFragMask     0x00000ffc /* Minimum Fragment                */
 151
 152/* MAC_Ctl bit asign ------------------------------------------------------- */
 153#define MAC_Link10             0x00008000 /* 1:Link Status 10Mbits           */
 154#define MAC_EnMissRoll         0x00002000 /* 1:Enable Missed Roll            */
 155#define MAC_MissRoll           0x00000400 /* 1:Missed Roll                   */
 156#define MAC_Loop10             0x00000080 /* 1:Loop 10 Mbps                  */
 157#define MAC_Conn_Auto          0x00000000 /*00:Connection mode (Automatic)   */
 158#define MAC_Conn_10M           0x00000020 /*01:                (10Mbps endec)*/
 159#define MAC_Conn_Mll           0x00000040 /*10:                (Mll clock)   */
 160#define MAC_MacLoop            0x00000010 /* 1:MAC Loopback                  */
 161#define MAC_FullDup            0x00000008 /* 1:Full Duplex 0:Half Duplex     */
 162#define MAC_Reset              0x00000004 /* 1:Software Reset                */
 163#define MAC_HaltImm            0x00000002 /* 1:Halt Immediate                */
 164#define MAC_HaltReq            0x00000001 /* 1:Halt request                  */
 165
 166/* PROM_Ctl bit asign ------------------------------------------------------ */
 167#define PROM_Busy              0x00008000 /* 1:Busy (Start Operation)        */
 168#define PROM_Read              0x00004000 /*10:Read operation                */
 169#define PROM_Write             0x00002000 /*01:Write operation               */
 170#define PROM_Erase             0x00006000 /*11:Erase operation               */
 171                                          /*00:Enable or Disable Writting,   */
 172                                          /*      as specified in PROM_Addr. */
 173#define PROM_Addr_Ena          0x00000030 /*11xxxx:PROM Write enable         */
 174                                          /*00xxxx:           disable        */
 175
 176/* CAM_Ctl bit asign ------------------------------------------------------- */
 177#define CAM_CompEn             0x00000010 /* 1:CAM Compare Enable            */
 178#define CAM_NegCAM             0x00000008 /* 1:Reject packets CAM recognizes,*/
 179                                          /*                    accept other */
 180#define CAM_BroadAcc           0x00000004 /* 1:Broadcast assept              */
 181#define CAM_GroupAcc           0x00000002 /* 1:Multicast assept              */
 182#define CAM_StationAcc         0x00000001 /* 1:unicast accept                */
 183
 184/* CAM_Ena bit asign ------------------------------------------------------- */
 185#define CAM_ENTRY_MAX                  21   /* CAM Data entry max count      */
 186#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits)  */
 187#define CAM_Ena_Bit(index)      (1 << (index))
 188#define CAM_ENTRY_DESTINATION   0
 189#define CAM_ENTRY_SOURCE        1
 190#define CAM_ENTRY_MACCTL        20
 191
 192/* Tx_Ctl bit asign -------------------------------------------------------- */
 193#define Tx_En                  0x00000001 /* 1:Transmit enable               */
 194#define Tx_TxHalt              0x00000002 /* 1:Transmit Halt Request         */
 195#define Tx_NoPad               0x00000004 /* 1:Suppress Padding              */
 196#define Tx_NoCRC               0x00000008 /* 1:Suppress Padding              */
 197#define Tx_FBack               0x00000010 /* 1:Fast Back-off                 */
 198#define Tx_EnUnder             0x00000100 /* 1:Enable Underrun               */
 199#define Tx_EnExDefer           0x00000200 /* 1:Enable Excessive Deferral     */
 200#define Tx_EnLCarr             0x00000400 /* 1:Enable Lost Carrier           */
 201#define Tx_EnExColl            0x00000800 /* 1:Enable Excessive Collision    */
 202#define Tx_EnLateColl          0x00001000 /* 1:Enable Late Collision         */
 203#define Tx_EnTxPar             0x00002000 /* 1:Enable Transmit Parity        */
 204#define Tx_EnComp              0x00004000 /* 1:Enable Completion             */
 205
 206/* Tx_Stat bit asign ------------------------------------------------------- */
 207#define Tx_TxColl_MASK         0x0000000F /* Tx Collision Count              */
 208#define Tx_ExColl              0x00000010 /* Excessive Collision             */
 209#define Tx_TXDefer             0x00000020 /* Transmit Defered                */
 210#define Tx_Paused              0x00000040 /* Transmit Paused                 */
 211#define Tx_IntTx               0x00000080 /* Interrupt on Tx                 */
 212#define Tx_Under               0x00000100 /* Underrun                        */
 213#define Tx_Defer               0x00000200 /* Deferral                        */
 214#define Tx_NCarr               0x00000400 /* No Carrier                      */
 215#define Tx_10Stat              0x00000800 /* 10Mbps Status                   */
 216#define Tx_LateColl            0x00001000 /* Late Collision                  */
 217#define Tx_TxPar               0x00002000 /* Tx Parity Error                 */
 218#define Tx_Comp                0x00004000 /* Completion                      */
 219#define Tx_Halted              0x00008000 /* Tx Halted                       */
 220#define Tx_SQErr               0x00010000 /* Signal Quality Error(SQE)       */
 221
 222/* Rx_Ctl bit asign -------------------------------------------------------- */
 223#define Rx_EnGood              0x00004000 /* 1:Enable Good                   */
 224#define Rx_EnRxPar             0x00002000 /* 1:Enable Receive Parity         */
 225#define Rx_EnLongErr           0x00000800 /* 1:Enable Long Error             */
 226#define Rx_EnOver              0x00000400 /* 1:Enable OverFlow               */
 227#define Rx_EnCRCErr            0x00000200 /* 1:Enable CRC Error              */
 228#define Rx_EnAlign             0x00000100 /* 1:Enable Alignment              */
 229#define Rx_IgnoreCRC           0x00000040 /* 1:Ignore CRC Value              */
 230#define Rx_StripCRC            0x00000010 /* 1:Strip CRC Value               */
 231#define Rx_ShortEn             0x00000008 /* 1:Short Enable                  */
 232#define Rx_LongEn              0x00000004 /* 1:Long Enable                   */
 233#define Rx_RxHalt              0x00000002 /* 1:Receive Halt Request          */
 234#define Rx_RxEn                0x00000001 /* 1:Receive Intrrupt Enable       */
 235
 236/* Rx_Stat bit asign ------------------------------------------------------- */
 237#define Rx_Halted              0x00008000 /* Rx Halted                       */
 238#define Rx_Good                0x00004000 /* Rx Good                         */
 239#define Rx_RxPar               0x00002000 /* Rx Parity Error                 */
 240#define Rx_TypePkt             0x00001000 /* Rx Type Packet                  */
 241#define Rx_LongErr             0x00000800 /* Rx Long Error                   */
 242#define Rx_Over                0x00000400 /* Rx Overflow                     */
 243#define Rx_CRCErr              0x00000200 /* Rx CRC Error                    */
 244#define Rx_Align               0x00000100 /* Rx Alignment Error              */
 245#define Rx_10Stat              0x00000080 /* Rx 10Mbps Status                */
 246#define Rx_IntRx               0x00000040 /* Rx Interrupt                    */
 247#define Rx_CtlRecd             0x00000020 /* Rx Control Receive              */
 248#define Rx_InLenErr            0x00000010 /* Rx In Range Frame Length Error  */
 249
 250#define Rx_Stat_Mask           0x0000FFF0 /* Rx All Status Mask              */
 251
 252/* Int_En bit asign -------------------------------------------------------- */
 253#define Int_NRAbtEn            0x00000800 /* 1:Non-recoverable Abort Enable  */
 254#define Int_TxCtlCmpEn         0x00000400 /* 1:Transmit Ctl Complete Enable  */
 255#define Int_DmParErrEn         0x00000200 /* 1:DMA Parity Error Enable       */
 256#define Int_DParDEn            0x00000100 /* 1:Data Parity Error Enable      */
 257#define Int_EarNotEn           0x00000080 /* 1:Early Notify Enable           */
 258#define Int_DParErrEn          0x00000040 /* 1:Detected Parity Error Enable  */
 259#define Int_SSysErrEn          0x00000020 /* 1:Signalled System Error Enable */
 260#define Int_RMasAbtEn          0x00000010 /* 1:Received Master Abort Enable  */
 261#define Int_RTargAbtEn         0x00000008 /* 1:Received Target Abort Enable  */
 262#define Int_STargAbtEn         0x00000004 /* 1:Signalled Target Abort Enable */
 263#define Int_BLExEn             0x00000002 /* 1:Buffer List Exhausted Enable  */
 264#define Int_FDAExEn            0x00000001 /* 1:Free Descriptor Area          */
 265                                          /*               Exhausted Enable  */
 266
 267/* Int_Src bit asign ------------------------------------------------------- */
 268#define Int_NRabt              0x00004000 /* 1:Non Recoverable error         */
 269#define Int_DmParErrStat       0x00002000 /* 1:DMA Parity Error & Clear      */
 270#define Int_BLEx               0x00001000 /* 1:Buffer List Empty & Clear     */
 271#define Int_FDAEx              0x00000800 /* 1:FDA Empty & Clear             */
 272#define Int_IntNRAbt           0x00000400 /* 1:Non Recoverable Abort         */
 273#define Int_IntCmp             0x00000200 /* 1:MAC control packet complete   */
 274#define Int_IntExBD            0x00000100 /* 1:Interrupt Extra BD & Clear    */
 275#define Int_DmParErr           0x00000080 /* 1:DMA Parity Error & Clear      */
 276#define Int_IntEarNot          0x00000040 /* 1:Receive Data write & Clear    */
 277#define Int_SWInt              0x00000020 /* 1:Software request & Clear      */
 278#define Int_IntBLEx            0x00000010 /* 1:Buffer List Empty & Clear     */
 279#define Int_IntFDAEx           0x00000008 /* 1:FDA Empty & Clear             */
 280#define Int_IntPCI             0x00000004 /* 1:PCI controller & Clear        */
 281#define Int_IntMacRx           0x00000002 /* 1:Rx controller & Clear         */
 282#define Int_IntMacTx           0x00000001 /* 1:Tx controller & Clear         */
 283
 284/* MD_CA bit asign --------------------------------------------------------- */
 285#define MD_CA_PreSup           0x00001000 /* 1:Preamble Supress              */
 286#define MD_CA_Busy             0x00000800 /* 1:Busy (Start Operation)        */
 287#define MD_CA_Wr               0x00000400 /* 1:Write 0:Read                  */
 288
 289
 290/*
 291 * Descriptors
 292 */
 293
 294/* Frame descripter */
 295struct FDesc {
 296        volatile __u32 FDNext;
 297        volatile __u32 FDSystem;
 298        volatile __u32 FDStat;
 299        volatile __u32 FDCtl;
 300};
 301
 302/* Buffer descripter */
 303struct BDesc {
 304        volatile __u32 BuffData;
 305        volatile __u32 BDCtl;
 306};
 307
 308#define FD_ALIGN        16
 309
 310/* Frame Descripter bit asign ---------------------------------------------- */
 311#define FD_FDLength_MASK       0x0000FFFF /* Length MASK                     */
 312#define FD_BDCnt_MASK          0x001F0000 /* BD count MASK in FD             */
 313#define FD_FrmOpt_MASK         0x7C000000 /* Frame option MASK               */
 314#define FD_FrmOpt_BigEndian    0x40000000 /* Tx/Rx */
 315#define FD_FrmOpt_IntTx        0x20000000 /* Tx only */
 316#define FD_FrmOpt_NoCRC        0x10000000 /* Tx only */
 317#define FD_FrmOpt_NoPadding    0x08000000 /* Tx only */
 318#define FD_FrmOpt_Packing      0x04000000 /* Rx only */
 319#define FD_CownsFD             0x80000000 /* FD Controller owner bit         */
 320#define FD_Next_EOL            0x00000001 /* FD EOL indicator                */
 321#define FD_BDCnt_SHIFT         16
 322
 323/* Buffer Descripter bit asign --------------------------------------------- */
 324#define BD_BuffLength_MASK     0x0000FFFF /* Recieve Data Size               */
 325#define BD_RxBDID_MASK         0x00FF0000 /* BD ID Number MASK               */
 326#define BD_RxBDSeqN_MASK       0x7F000000 /* Rx BD Sequence Number           */
 327#define BD_CownsBD             0x80000000 /* BD Controller owner bit         */
 328#define BD_RxBDID_SHIFT        16
 329#define BD_RxBDSeqN_SHIFT      24
 330
 331
 332/* Some useful constants. */
 333#undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */
 334
 335#ifdef NO_CHECK_CARRIER
 336#define TX_CTL_CMD      (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
 337        Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
 338        Tx_En)  /* maybe  0x7b01 */
 339#else
 340#define TX_CTL_CMD      (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
 341        Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
 342        Tx_En)  /* maybe  0x7b01 */
 343#endif
 344/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
 345#define RX_CTL_CMD      (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
 346        | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
 347#define INT_EN_CMD  (Int_NRAbtEn | \
 348        Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
 349        Int_SSysErrEn  | Int_RMasAbtEn | Int_RTargAbtEn | \
 350        Int_STargAbtEn | \
 351        Int_BLExEn  | Int_FDAExEn) /* maybe 0xb7f*/
 352#define DMA_CTL_CMD     DMA_BURST_SIZE
 353#define HAVE_DMA_RXALIGN(lp)    likely((lp)->chiptype != TC35815CF)
 354
 355/* Tuning parameters */
 356#define DMA_BURST_SIZE  32
 357#define TX_THRESHOLD    1024
 358/* used threshold with packet max byte for low pci transfer ability.*/
 359#define TX_THRESHOLD_MAX 1536
 360/* setting threshold max value when overrun error occured this count. */
 361#define TX_THRESHOLD_KEEP_LIMIT 10
 362
 363/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
 364#ifdef TC35815_USE_PACKEDBUFFER
 365#define FD_PAGE_NUM 2
 366#define RX_BUF_NUM      8       /* >= 2 */
 367#define RX_FD_NUM       250     /* >= 32 */
 368#define TX_FD_NUM       128
 369#define RX_BUF_SIZE     PAGE_SIZE
 370#else /* TC35815_USE_PACKEDBUFFER */
 371#define FD_PAGE_NUM 4
 372#define RX_BUF_NUM      128     /* < 256 */
 373#define RX_FD_NUM       256     /* >= 32 */
 374#define TX_FD_NUM       128
 375#if RX_CTL_CMD & Rx_LongEn
 376#define RX_BUF_SIZE     PAGE_SIZE
 377#elif RX_CTL_CMD & Rx_StripCRC
 378#define RX_BUF_SIZE     \
 379        L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
 380#else
 381#define RX_BUF_SIZE     \
 382        L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
 383#endif
 384#endif /* TC35815_USE_PACKEDBUFFER */
 385#define RX_FD_RESERVE   (2 / 2) /* max 2 BD per RxFD */
 386#define NAPI_WEIGHT     16
 387
 388struct TxFD {
 389        struct FDesc fd;
 390        struct BDesc bd;
 391        struct BDesc unused;
 392};
 393
 394struct RxFD {
 395        struct FDesc fd;
 396        struct BDesc bd[0];     /* variable length */
 397};
 398
 399struct FrFD {
 400        struct FDesc fd;
 401        struct BDesc bd[RX_BUF_NUM];
 402};
 403
 404
 405#define tc_readl(addr)  ioread32(addr)
 406#define tc_writel(d, addr)      iowrite32(d, addr)
 407
 408#define TC35815_TX_TIMEOUT  msecs_to_jiffies(400)
 409
 410/* Information that need to be kept for each controller. */
 411struct tc35815_local {
 412        struct pci_dev *pci_dev;
 413
 414        struct net_device *dev;
 415        struct napi_struct napi;
 416
 417        /* statistics */
 418        struct {
 419                int max_tx_qlen;
 420                int tx_ints;
 421                int rx_ints;
 422                int tx_underrun;
 423        } lstats;
 424
 425        /* Tx control lock.  This protects the transmit buffer ring
 426         * state along with the "tx full" state of the driver.  This
 427         * means all netif_queue flow control actions are protected
 428         * by this lock as well.
 429         */
 430        spinlock_t lock;
 431
 432        struct mii_bus *mii_bus;
 433        struct phy_device *phy_dev;
 434        int duplex;
 435        int speed;
 436        int link;
 437        struct work_struct restart_work;
 438
 439        /*
 440         * Transmitting: Batch Mode.
 441         *      1 BD in 1 TxFD.
 442         * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER)
 443         *      1 circular FD for Free Buffer List.
 444         *      RX_BUF_NUM BD in Free Buffer FD.
 445         *      One Free Buffer BD has PAGE_SIZE data buffer.
 446         * Or Non-Packing Mode.
 447         *      1 circular FD for Free Buffer List.
 448         *      RX_BUF_NUM BD in Free Buffer FD.
 449         *      One Free Buffer BD has ETH_FRAME_LEN data buffer.
 450         */
 451        void *fd_buf;   /* for TxFD, RxFD, FrFD */
 452        dma_addr_t fd_buf_dma;
 453        struct TxFD *tfd_base;
 454        unsigned int tfd_start;
 455        unsigned int tfd_end;
 456        struct RxFD *rfd_base;
 457        struct RxFD *rfd_limit;
 458        struct RxFD *rfd_cur;
 459        struct FrFD *fbl_ptr;
 460#ifdef TC35815_USE_PACKEDBUFFER
 461        unsigned char fbl_curid;
 462        void *data_buf[RX_BUF_NUM];             /* packing */
 463        dma_addr_t data_buf_dma[RX_BUF_NUM];
 464        struct {
 465                struct sk_buff *skb;
 466                dma_addr_t skb_dma;
 467        } tx_skbs[TX_FD_NUM];
 468#else
 469        unsigned int fbl_count;
 470        struct {
 471                struct sk_buff *skb;
 472                dma_addr_t skb_dma;
 473        } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
 474#endif
 475        u32 msg_enable;
 476        enum tc35815_chiptype chiptype;
 477};
 478
 479static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
 480{
 481        return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
 482}
 483#ifdef DEBUG
 484static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
 485{
 486        return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
 487}
 488#endif
 489#ifdef TC35815_USE_PACKEDBUFFER
 490static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
 491{
 492        int i;
 493        for (i = 0; i < RX_BUF_NUM; i++) {
 494                if (bus >= lp->data_buf_dma[i] &&
 495                    bus < lp->data_buf_dma[i] + PAGE_SIZE)
 496                        return (void *)((u8 *)lp->data_buf[i] +
 497                                        (bus - lp->data_buf_dma[i]));
 498        }
 499        return NULL;
 500}
 501
 502#define TC35815_DMA_SYNC_ONDEMAND
 503static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
 504{
 505#ifdef TC35815_DMA_SYNC_ONDEMAND
 506        void *buf;
 507        /* pci_map + pci_dma_sync will be more effective than
 508         * pci_alloc_consistent on some archs. */
 509        buf = (void *)__get_free_page(GFP_ATOMIC);
 510        if (!buf)
 511                return NULL;
 512        *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
 513                                     PCI_DMA_FROMDEVICE);
 514        if (pci_dma_mapping_error(hwdev, *dma_handle)) {
 515                free_page((unsigned long)buf);
 516                return NULL;
 517        }
 518        return buf;
 519#else
 520        return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
 521#endif
 522}
 523
 524static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
 525{
 526#ifdef TC35815_DMA_SYNC_ONDEMAND
 527        pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
 528        free_page((unsigned long)buf);
 529#else
 530        pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
 531#endif
 532}
 533#else /* TC35815_USE_PACKEDBUFFER */
 534static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
 535                                       struct pci_dev *hwdev,
 536                                       dma_addr_t *dma_handle)
 537{
 538        struct sk_buff *skb;
 539        skb = dev_alloc_skb(RX_BUF_SIZE);
 540        if (!skb)
 541                return NULL;
 542        *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
 543                                     PCI_DMA_FROMDEVICE);
 544        if (pci_dma_mapping_error(hwdev, *dma_handle)) {
 545                dev_kfree_skb_any(skb);
 546                return NULL;
 547        }
 548        skb_reserve(skb, 2);    /* make IP header 4byte aligned */
 549        return skb;
 550}
 551
 552static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
 553{
 554        pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
 555                         PCI_DMA_FROMDEVICE);
 556        dev_kfree_skb_any(skb);
 557}
 558#endif /* TC35815_USE_PACKEDBUFFER */
 559
 560/* Index to functions, as function prototypes. */
 561
 562static int      tc35815_open(struct net_device *dev);
 563static int      tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
 564static irqreturn_t      tc35815_interrupt(int irq, void *dev_id);
 565#ifdef TC35815_NAPI
 566static int      tc35815_rx(struct net_device *dev, int limit);
 567static int      tc35815_poll(struct napi_struct *napi, int budget);
 568#else
 569static void     tc35815_rx(struct net_device *dev);
 570#endif
 571static void     tc35815_txdone(struct net_device *dev);
 572static int      tc35815_close(struct net_device *dev);
 573static struct   net_device_stats *tc35815_get_stats(struct net_device *dev);
 574static void     tc35815_set_multicast_list(struct net_device *dev);
 575static void     tc35815_tx_timeout(struct net_device *dev);
 576static int      tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 577#ifdef CONFIG_NET_POLL_CONTROLLER
 578static void     tc35815_poll_controller(struct net_device *dev);
 579#endif
 580static const struct ethtool_ops tc35815_ethtool_ops;
 581
 582/* Example routines you must write ;->. */
 583static void     tc35815_chip_reset(struct net_device *dev);
 584static void     tc35815_chip_init(struct net_device *dev);
 585
 586#ifdef DEBUG
 587static void     panic_queues(struct net_device *dev);
 588#endif
 589
 590static void tc35815_restart_work(struct work_struct *work);
 591
 592static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 593{
 594        struct net_device *dev = bus->priv;
 595        struct tc35815_regs __iomem *tr =
 596                (struct tc35815_regs __iomem *)dev->base_addr;
 597        unsigned long timeout = jiffies + HZ;
 598
 599        tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
 600        udelay(12); /* it takes 32 x 400ns at least */
 601        while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
 602                if (time_after(jiffies, timeout))
 603                        return -EIO;
 604                cpu_relax();
 605        }
 606        return tc_readl(&tr->MD_Data) & 0xffff;
 607}
 608
 609static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
 610{
 611        struct net_device *dev = bus->priv;
 612        struct tc35815_regs __iomem *tr =
 613                (struct tc35815_regs __iomem *)dev->base_addr;
 614        unsigned long timeout = jiffies + HZ;
 615
 616        tc_writel(val, &tr->MD_Data);
 617        tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
 618                  &tr->MD_CA);
 619        udelay(12); /* it takes 32 x 400ns at least */
 620        while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
 621                if (time_after(jiffies, timeout))
 622                        return -EIO;
 623                cpu_relax();
 624        }
 625        return 0;
 626}
 627
 628static void tc_handle_link_change(struct net_device *dev)
 629{
 630        struct tc35815_local *lp = netdev_priv(dev);
 631        struct phy_device *phydev = lp->phy_dev;
 632        unsigned long flags;
 633        int status_change = 0;
 634
 635        spin_lock_irqsave(&lp->lock, flags);
 636        if (phydev->link &&
 637            (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
 638                struct tc35815_regs __iomem *tr =
 639                        (struct tc35815_regs __iomem *)dev->base_addr;
 640                u32 reg;
 641
 642                reg = tc_readl(&tr->MAC_Ctl);
 643                reg |= MAC_HaltReq;
 644                tc_writel(reg, &tr->MAC_Ctl);
 645                if (phydev->duplex == DUPLEX_FULL)
 646                        reg |= MAC_FullDup;
 647                else
 648                        reg &= ~MAC_FullDup;
 649                tc_writel(reg, &tr->MAC_Ctl);
 650                reg &= ~MAC_HaltReq;
 651                tc_writel(reg, &tr->MAC_Ctl);
 652
 653                /*
 654                 * TX4939 PCFG.SPEEDn bit will be changed on
 655                 * NETDEV_CHANGE event.
 656                 */
 657
 658#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
 659                /*
 660                 * WORKAROUND: enable LostCrS only if half duplex
 661                 * operation.
 662                 * (TX4939 does not have EnLCarr)
 663                 */
 664                if (phydev->duplex == DUPLEX_HALF &&
 665                    lp->chiptype != TC35815_TX4939)
 666                        tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
 667                                  &tr->Tx_Ctl);
 668#endif
 669
 670                lp->speed = phydev->speed;
 671                lp->duplex = phydev->duplex;
 672                status_change = 1;
 673        }
 674
 675        if (phydev->link != lp->link) {
 676                if (phydev->link) {
 677#ifdef WORKAROUND_100HALF_PROMISC
 678                        /* delayed promiscuous enabling */
 679                        if (dev->flags & IFF_PROMISC)
 680                                tc35815_set_multicast_list(dev);
 681#endif
 682                } else {
 683                        lp->speed = 0;
 684                        lp->duplex = -1;
 685                }
 686                lp->link = phydev->link;
 687
 688                status_change = 1;
 689        }
 690        spin_unlock_irqrestore(&lp->lock, flags);
 691
 692        if (status_change && netif_msg_link(lp)) {
 693                phy_print_status(phydev);
 694                pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
 695                         dev->name,
 696                         phy_read(phydev, MII_BMCR),
 697                         phy_read(phydev, MII_BMSR),
 698                         phy_read(phydev, MII_LPA));
 699        }
 700}
 701
 702static int tc_mii_probe(struct net_device *dev)
 703{
 704        struct tc35815_local *lp = netdev_priv(dev);
 705        struct phy_device *phydev = NULL;
 706        int phy_addr;
 707        u32 dropmask;
 708
 709        /* find the first phy */
 710        for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
 711                if (lp->mii_bus->phy_map[phy_addr]) {
 712                        if (phydev) {
 713                                printk(KERN_ERR "%s: multiple PHYs found\n",
 714                                       dev->name);
 715                                return -EINVAL;
 716                        }
 717                        phydev = lp->mii_bus->phy_map[phy_addr];
 718                        break;
 719                }
 720        }
 721
 722        if (!phydev) {
 723                printk(KERN_ERR "%s: no PHY found\n", dev->name);
 724                return -ENODEV;
 725        }
 726
 727        /* attach the mac to the phy */
 728        phydev = phy_connect(dev, dev_name(&phydev->dev),
 729                             &tc_handle_link_change, 0,
 730                             lp->chiptype == TC35815_TX4939 ?
 731                             PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
 732        if (IS_ERR(phydev)) {
 733                printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
 734                return PTR_ERR(phydev);
 735        }
 736        printk(KERN_INFO "%s: attached PHY driver [%s] "
 737                "(mii_bus:phy_addr=%s, id=%x)\n",
 738                dev->name, phydev->drv->name, dev_name(&phydev->dev),
 739                phydev->phy_id);
 740
 741        /* mask with MAC supported features */
 742        phydev->supported &= PHY_BASIC_FEATURES;
 743        dropmask = 0;
 744        if (options.speed == 10)
 745                dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
 746        else if (options.speed == 100)
 747                dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
 748        if (options.duplex == 1)
 749                dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
 750        else if (options.duplex == 2)
 751                dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
 752        phydev->supported &= ~dropmask;
 753        phydev->advertising = phydev->supported;
 754
 755        lp->link = 0;
 756        lp->speed = 0;
 757        lp->duplex = -1;
 758        lp->phy_dev = phydev;
 759
 760        return 0;
 761}
 762
 763static int tc_mii_init(struct net_device *dev)
 764{
 765        struct tc35815_local *lp = netdev_priv(dev);
 766        int err;
 767        int i;
 768
 769        lp->mii_bus = mdiobus_alloc();
 770        if (lp->mii_bus == NULL) {
 771                err = -ENOMEM;
 772                goto err_out;
 773        }
 774
 775        lp->mii_bus->name = "tc35815_mii_bus";
 776        lp->mii_bus->read = tc_mdio_read;
 777        lp->mii_bus->write = tc_mdio_write;
 778        snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
 779                 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
 780        lp->mii_bus->priv = dev;
 781        lp->mii_bus->parent = &lp->pci_dev->dev;
 782        lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
 783        if (!lp->mii_bus->irq) {
 784                err = -ENOMEM;
 785                goto err_out_free_mii_bus;
 786        }
 787
 788        for (i = 0; i < PHY_MAX_ADDR; i++)
 789                lp->mii_bus->irq[i] = PHY_POLL;
 790
 791        err = mdiobus_register(lp->mii_bus);
 792        if (err)
 793                goto err_out_free_mdio_irq;
 794        err = tc_mii_probe(dev);
 795        if (err)
 796                goto err_out_unregister_bus;
 797        return 0;
 798
 799err_out_unregister_bus:
 800        mdiobus_unregister(lp->mii_bus);
 801err_out_free_mdio_irq:
 802        kfree(lp->mii_bus->irq);
 803err_out_free_mii_bus:
 804        mdiobus_free(lp->mii_bus);
 805err_out:
 806        return err;
 807}
 808
 809#ifdef CONFIG_CPU_TX49XX
 810/*
 811 * Find a platform_device providing a MAC address.  The platform code
 812 * should provide a "tc35815-mac" device with a MAC address in its
 813 * platform_data.
 814 */
 815static int __devinit tc35815_mac_match(struct device *dev, void *data)
 816{
 817        struct platform_device *plat_dev = to_platform_device(dev);
 818        struct pci_dev *pci_dev = data;
 819        unsigned int id = pci_dev->irq;
 820        return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
 821}
 822
 823static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
 824{
 825        struct tc35815_local *lp = netdev_priv(dev);
 826        struct device *pd = bus_find_device(&platform_bus_type, NULL,
 827                                            lp->pci_dev, tc35815_mac_match);
 828        if (pd) {
 829                if (pd->platform_data)
 830                        memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
 831                put_device(pd);
 832                return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
 833        }
 834        return -ENODEV;
 835}
 836#else
 837static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
 838{
 839        return -ENODEV;
 840}
 841#endif
 842
 843static int __devinit tc35815_init_dev_addr(struct net_device *dev)
 844{
 845        struct tc35815_regs __iomem *tr =
 846                (struct tc35815_regs __iomem *)dev->base_addr;
 847        int i;
 848
 849        while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
 850                ;
 851        for (i = 0; i < 6; i += 2) {
 852                unsigned short data;
 853                tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
 854                while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
 855                        ;
 856                data = tc_readl(&tr->PROM_Data);
 857                dev->dev_addr[i] = data & 0xff;
 858                dev->dev_addr[i+1] = data >> 8;
 859        }
 860        if (!is_valid_ether_addr(dev->dev_addr))
 861                return tc35815_read_plat_dev_addr(dev);
 862        return 0;
 863}
 864
 865static const struct net_device_ops tc35815_netdev_ops = {
 866        .ndo_open               = tc35815_open,
 867        .ndo_stop               = tc35815_close,
 868        .ndo_start_xmit         = tc35815_send_packet,
 869        .ndo_get_stats          = tc35815_get_stats,
 870        .ndo_set_multicast_list = tc35815_set_multicast_list,
 871        .ndo_tx_timeout         = tc35815_tx_timeout,
 872        .ndo_do_ioctl           = tc35815_ioctl,
 873        .ndo_validate_addr      = eth_validate_addr,
 874        .ndo_change_mtu         = eth_change_mtu,
 875        .ndo_set_mac_address    = eth_mac_addr,
 876#ifdef CONFIG_NET_POLL_CONTROLLER
 877        .ndo_poll_controller    = tc35815_poll_controller,
 878#endif
 879};
 880
 881static int __devinit tc35815_init_one(struct pci_dev *pdev,
 882                                      const struct pci_device_id *ent)
 883{
 884        void __iomem *ioaddr = NULL;
 885        struct net_device *dev;
 886        struct tc35815_local *lp;
 887        int rc;
 888
 889        static int printed_version;
 890        if (!printed_version++) {
 891                printk(version);
 892                dev_printk(KERN_DEBUG, &pdev->dev,
 893                           "speed:%d duplex:%d\n",
 894                           options.speed, options.duplex);
 895        }
 896
 897        if (!pdev->irq) {
 898                dev_warn(&pdev->dev, "no IRQ assigned.\n");
 899                return -ENODEV;
 900        }
 901
 902        /* dev zeroed in alloc_etherdev */
 903        dev = alloc_etherdev(sizeof(*lp));
 904        if (dev == NULL) {
 905                dev_err(&pdev->dev, "unable to alloc new ethernet\n");
 906                return -ENOMEM;
 907        }
 908        SET_NETDEV_DEV(dev, &pdev->dev);
 909        lp = netdev_priv(dev);
 910        lp->dev = dev;
 911
 912        /* enable device (incl. PCI PM wakeup), and bus-mastering */
 913        rc = pcim_enable_device(pdev);
 914        if (rc)
 915                goto err_out;
 916        rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
 917        if (rc)
 918                goto err_out;
 919        pci_set_master(pdev);
 920        ioaddr = pcim_iomap_table(pdev)[1];
 921
 922        /* Initialize the device structure. */
 923        dev->netdev_ops = &tc35815_netdev_ops;
 924        dev->ethtool_ops = &tc35815_ethtool_ops;
 925        dev->watchdog_timeo = TC35815_TX_TIMEOUT;
 926#ifdef TC35815_NAPI
 927        netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
 928#endif
 929
 930        dev->irq = pdev->irq;
 931        dev->base_addr = (unsigned long)ioaddr;
 932
 933        INIT_WORK(&lp->restart_work, tc35815_restart_work);
 934        spin_lock_init(&lp->lock);
 935        lp->pci_dev = pdev;
 936        lp->chiptype = ent->driver_data;
 937
 938        lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
 939        pci_set_drvdata(pdev, dev);
 940
 941        /* Soft reset the chip. */
 942        tc35815_chip_reset(dev);
 943
 944        /* Retrieve the ethernet address. */
 945        if (tc35815_init_dev_addr(dev)) {
 946                dev_warn(&pdev->dev, "not valid ether addr\n");
 947                random_ether_addr(dev->dev_addr);
 948        }
 949
 950        rc = register_netdev(dev);
 951        if (rc)
 952                goto err_out;
 953
 954        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 955        printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
 956                dev->name,
 957                chip_info[ent->driver_data].name,
 958                dev->base_addr,
 959                dev->dev_addr,
 960                dev->irq);
 961
 962        rc = tc_mii_init(dev);
 963        if (rc)
 964                goto err_out_unregister;
 965
 966        return 0;
 967
 968err_out_unregister:
 969        unregister_netdev(dev);
 970err_out:
 971        free_netdev(dev);
 972        return rc;
 973}
 974
 975
 976static void __devexit tc35815_remove_one(struct pci_dev *pdev)
 977{
 978        struct net_device *dev = pci_get_drvdata(pdev);
 979        struct tc35815_local *lp = netdev_priv(dev);
 980
 981        phy_disconnect(lp->phy_dev);
 982        mdiobus_unregister(lp->mii_bus);
 983        kfree(lp->mii_bus->irq);
 984        mdiobus_free(lp->mii_bus);
 985        unregister_netdev(dev);
 986        free_netdev(dev);
 987        pci_set_drvdata(pdev, NULL);
 988}
 989
 990static int
 991tc35815_init_queues(struct net_device *dev)
 992{
 993        struct tc35815_local *lp = netdev_priv(dev);
 994        int i;
 995        unsigned long fd_addr;
 996
 997        if (!lp->fd_buf) {
 998                BUG_ON(sizeof(struct FDesc) +
 999                       sizeof(struct BDesc) * RX_BUF_NUM +
1000                       sizeof(struct FDesc) * RX_FD_NUM +
1001                       sizeof(struct TxFD) * TX_FD_NUM >
1002                       PAGE_SIZE * FD_PAGE_NUM);
1003
1004                lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
1005                                                  PAGE_SIZE * FD_PAGE_NUM,
1006                                                  &lp->fd_buf_dma);
1007                if (!lp->fd_buf)
1008                        return -ENOMEM;
1009                for (i = 0; i < RX_BUF_NUM; i++) {
1010#ifdef TC35815_USE_PACKEDBUFFER
1011                        lp->data_buf[i] =
1012                                alloc_rxbuf_page(lp->pci_dev,
1013                                                 &lp->data_buf_dma[i]);
1014                        if (!lp->data_buf[i]) {
1015                                while (--i >= 0) {
1016                                        free_rxbuf_page(lp->pci_dev,
1017                                                        lp->data_buf[i],
1018                                                        lp->data_buf_dma[i]);
1019                                        lp->data_buf[i] = NULL;
1020                                }
1021                                pci_free_consistent(lp->pci_dev,
1022                                                    PAGE_SIZE * FD_PAGE_NUM,
1023                                                    lp->fd_buf,
1024                                                    lp->fd_buf_dma);
1025                                lp->fd_buf = NULL;
1026                                return -ENOMEM;
1027                        }
1028#else
1029                        lp->rx_skbs[i].skb =
1030                                alloc_rxbuf_skb(dev, lp->pci_dev,
1031                                                &lp->rx_skbs[i].skb_dma);
1032                        if (!lp->rx_skbs[i].skb) {
1033                                while (--i >= 0) {
1034                                        free_rxbuf_skb(lp->pci_dev,
1035                                                       lp->rx_skbs[i].skb,
1036                                                       lp->rx_skbs[i].skb_dma);
1037                                        lp->rx_skbs[i].skb = NULL;
1038                                }
1039                                pci_free_consistent(lp->pci_dev,
1040                                                    PAGE_SIZE * FD_PAGE_NUM,
1041                                                    lp->fd_buf,
1042                                                    lp->fd_buf_dma);
1043                                lp->fd_buf = NULL;
1044                                return -ENOMEM;
1045                        }
1046#endif
1047                }
1048                printk(KERN_DEBUG "%s: FD buf %p DataBuf",
1049                       dev->name, lp->fd_buf);
1050#ifdef TC35815_USE_PACKEDBUFFER
1051                printk(" DataBuf");
1052                for (i = 0; i < RX_BUF_NUM; i++)
1053                        printk(" %p", lp->data_buf[i]);
1054#endif
1055                printk("\n");
1056        } else {
1057                for (i = 0; i < FD_PAGE_NUM; i++)
1058                        clear_page((void *)((unsigned long)lp->fd_buf +
1059                                            i * PAGE_SIZE));
1060        }
1061        fd_addr = (unsigned long)lp->fd_buf;
1062
1063        /* Free Descriptors (for Receive) */
1064        lp->rfd_base = (struct RxFD *)fd_addr;
1065        fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
1066        for (i = 0; i < RX_FD_NUM; i++)
1067                lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
1068        lp->rfd_cur = lp->rfd_base;
1069        lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
1070
1071        /* Transmit Descriptors */
1072        lp->tfd_base = (struct TxFD *)fd_addr;
1073        fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
1074        for (i = 0; i < TX_FD_NUM; i++) {
1075                lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
1076                lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1077                lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
1078        }
1079        lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
1080        lp->tfd_start = 0;
1081        lp->tfd_end = 0;
1082
1083        /* Buffer List (for Receive) */
1084        lp->fbl_ptr = (struct FrFD *)fd_addr;
1085        lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
1086        lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
1087#ifndef TC35815_USE_PACKEDBUFFER
1088        /*
1089         * move all allocated skbs to head of rx_skbs[] array.
1090         * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
1091         * tc35815_rx() had failed.
1092         */
1093        lp->fbl_count = 0;
1094        for (i = 0; i < RX_BUF_NUM; i++) {
1095                if (lp->rx_skbs[i].skb) {
1096                        if (i != lp->fbl_count) {
1097                                lp->rx_skbs[lp->fbl_count].skb =
1098                                        lp->rx_skbs[i].skb;
1099                                lp->rx_skbs[lp->fbl_count].skb_dma =
1100                                        lp->rx_skbs[i].skb_dma;
1101                        }
1102                        lp->fbl_count++;
1103                }
1104        }
1105#endif
1106        for (i = 0; i < RX_BUF_NUM; i++) {
1107#ifdef TC35815_USE_PACKEDBUFFER
1108                lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
1109#else
1110                if (i >= lp->fbl_count) {
1111                        lp->fbl_ptr->bd[i].BuffData = 0;
1112                        lp->fbl_ptr->bd[i].BDCtl = 0;
1113                        continue;
1114                }
1115                lp->fbl_ptr->bd[i].BuffData =
1116                        cpu_to_le32(lp->rx_skbs[i].skb_dma);
1117#endif
1118                /* BDID is index of FrFD.bd[] */
1119                lp->fbl_ptr->bd[i].BDCtl =
1120                        cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
1121                                    RX_BUF_SIZE);
1122        }
1123#ifdef TC35815_USE_PACKEDBUFFER
1124        lp->fbl_curid = 0;
1125#endif
1126
1127        printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
1128               dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
1129        return 0;
1130}
1131
1132static void
1133tc35815_clear_queues(struct net_device *dev)
1134{
1135        struct tc35815_local *lp = netdev_priv(dev);
1136        int i;
1137
1138        for (i = 0; i < TX_FD_NUM; i++) {
1139                u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1140                struct sk_buff *skb =
1141                        fdsystem != 0xffffffff ?
1142                        lp->tx_skbs[fdsystem].skb : NULL;
1143#ifdef DEBUG
1144                if (lp->tx_skbs[i].skb != skb) {
1145                        printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1146                        panic_queues(dev);
1147                }
1148#else
1149                BUG_ON(lp->tx_skbs[i].skb != skb);
1150#endif
1151                if (skb) {
1152                        pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1153                        lp->tx_skbs[i].skb = NULL;
1154                        lp->tx_skbs[i].skb_dma = 0;
1155                        dev_kfree_skb_any(skb);
1156                }
1157                lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1158        }
1159
1160        tc35815_init_queues(dev);
1161}
1162
1163static void
1164tc35815_free_queues(struct net_device *dev)
1165{
1166        struct tc35815_local *lp = netdev_priv(dev);
1167        int i;
1168
1169        if (lp->tfd_base) {
1170                for (i = 0; i < TX_FD_NUM; i++) {
1171                        u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1172                        struct sk_buff *skb =
1173                                fdsystem != 0xffffffff ?
1174                                lp->tx_skbs[fdsystem].skb : NULL;
1175#ifdef DEBUG
1176                        if (lp->tx_skbs[i].skb != skb) {
1177                                printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1178                                panic_queues(dev);
1179                        }
1180#else
1181                        BUG_ON(lp->tx_skbs[i].skb != skb);
1182#endif
1183                        if (skb) {
1184                                dev_kfree_skb(skb);
1185                                pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1186                                lp->tx_skbs[i].skb = NULL;
1187                                lp->tx_skbs[i].skb_dma = 0;
1188                        }
1189                        lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1190                }
1191        }
1192
1193        lp->rfd_base = NULL;
1194        lp->rfd_limit = NULL;
1195        lp->rfd_cur = NULL;
1196        lp->fbl_ptr = NULL;
1197
1198        for (i = 0; i < RX_BUF_NUM; i++) {
1199#ifdef TC35815_USE_PACKEDBUFFER
1200                if (lp->data_buf[i]) {
1201                        free_rxbuf_page(lp->pci_dev,
1202                                        lp->data_buf[i], lp->data_buf_dma[i]);
1203                        lp->data_buf[i] = NULL;
1204                }
1205#else
1206                if (lp->rx_skbs[i].skb) {
1207                        free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1208                                       lp->rx_skbs[i].skb_dma);
1209                        lp->rx_skbs[i].skb = NULL;
1210                }
1211#endif
1212        }
1213        if (lp->fd_buf) {
1214                pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
1215                                    lp->fd_buf, lp->fd_buf_dma);
1216                lp->fd_buf = NULL;
1217        }
1218}
1219
1220static void
1221dump_txfd(struct TxFD *fd)
1222{
1223        printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
1224               le32_to_cpu(fd->fd.FDNext),
1225               le32_to_cpu(fd->fd.FDSystem),
1226               le32_to_cpu(fd->fd.FDStat),
1227               le32_to_cpu(fd->fd.FDCtl));
1228        printk("BD: ");
1229        printk(" %08x %08x",
1230               le32_to_cpu(fd->bd.BuffData),
1231               le32_to_cpu(fd->bd.BDCtl));
1232        printk("\n");
1233}
1234
1235static int
1236dump_rxfd(struct RxFD *fd)
1237{
1238        int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1239        if (bd_count > 8)
1240                bd_count = 8;
1241        printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
1242               le32_to_cpu(fd->fd.FDNext),
1243               le32_to_cpu(fd->fd.FDSystem),
1244               le32_to_cpu(fd->fd.FDStat),
1245               le32_to_cpu(fd->fd.FDCtl));
1246        if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1247                return 0;
1248        printk("BD: ");
1249        for (i = 0; i < bd_count; i++)
1250                printk(" %08x %08x",
1251                       le32_to_cpu(fd->bd[i].BuffData),
1252                       le32_to_cpu(fd->bd[i].BDCtl));
1253        printk("\n");
1254        return bd_count;
1255}
1256
1257#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER)
1258static void
1259dump_frfd(struct FrFD *fd)
1260{
1261        int i;
1262        printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
1263               le32_to_cpu(fd->fd.FDNext),
1264               le32_to_cpu(fd->fd.FDSystem),
1265               le32_to_cpu(fd->fd.FDStat),
1266               le32_to_cpu(fd->fd.FDCtl));
1267        printk("BD: ");
1268        for (i = 0; i < RX_BUF_NUM; i++)
1269                printk(" %08x %08x",
1270                       le32_to_cpu(fd->bd[i].BuffData),
1271                       le32_to_cpu(fd->bd[i].BDCtl));
1272        printk("\n");
1273}
1274#endif
1275
1276#ifdef DEBUG
1277static void
1278panic_queues(struct net_device *dev)
1279{
1280        struct tc35815_local *lp = netdev_priv(dev);
1281        int i;
1282
1283        printk("TxFD base %p, start %u, end %u\n",
1284               lp->tfd_base, lp->tfd_start, lp->tfd_end);
1285        printk("RxFD base %p limit %p cur %p\n",
1286               lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
1287        printk("FrFD %p\n", lp->fbl_ptr);
1288        for (i = 0; i < TX_FD_NUM; i++)
1289                dump_txfd(&lp->tfd_base[i]);
1290        for (i = 0; i < RX_FD_NUM; i++) {
1291                int bd_count = dump_rxfd(&lp->rfd_base[i]);
1292                i += (bd_count + 1) / 2;        /* skip BDs */
1293        }
1294        dump_frfd(lp->fbl_ptr);
1295        panic("%s: Illegal queue state.", dev->name);
1296}
1297#endif
1298
1299static void print_eth(const u8 *add)
1300{
1301        printk(KERN_DEBUG "print_eth(%p)\n", add);
1302        printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
1303                add + 6, add, add[12], add[13]);
1304}
1305
1306static int tc35815_tx_full(struct net_device *dev)
1307{
1308        struct tc35815_local *lp = netdev_priv(dev);
1309        return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end);
1310}
1311
1312static void tc35815_restart(struct net_device *dev)
1313{
1314        struct tc35815_local *lp = netdev_priv(dev);
1315
1316        if (lp->phy_dev) {
1317                int timeout;
1318
1319                phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
1320                timeout = 100;
1321                while (--timeout) {
1322                        if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
1323                                break;
1324                        udelay(1);
1325                }
1326                if (!timeout)
1327                        printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
1328        }
1329
1330        spin_lock_irq(&lp->lock);
1331        tc35815_chip_reset(dev);
1332        tc35815_clear_queues(dev);
1333        tc35815_chip_init(dev);
1334        /* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1335        tc35815_set_multicast_list(dev);
1336        spin_unlock_irq(&lp->lock);
1337
1338        netif_wake_queue(dev);
1339}
1340
1341static void tc35815_restart_work(struct work_struct *work)
1342{
1343        struct tc35815_local *lp =
1344                container_of(work, struct tc35815_local, restart_work);
1345        struct net_device *dev = lp->dev;
1346
1347        tc35815_restart(dev);
1348}
1349
1350static void tc35815_schedule_restart(struct net_device *dev)
1351{
1352        struct tc35815_local *lp = netdev_priv(dev);
1353        struct tc35815_regs __iomem *tr =
1354                (struct tc35815_regs __iomem *)dev->base_addr;
1355
1356        /* disable interrupts */
1357        tc_writel(0, &tr->Int_En);
1358        tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1359        schedule_work(&lp->restart_work);
1360}
1361
1362static void tc35815_tx_timeout(struct net_device *dev)
1363{
1364        struct tc35815_regs __iomem *tr =
1365                (struct tc35815_regs __iomem *)dev->base_addr;
1366
1367        printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
1368               dev->name, tc_readl(&tr->Tx_Stat));
1369
1370        /* Try to restart the adaptor. */
1371        tc35815_schedule_restart(dev);
1372        dev->stats.tx_errors++;
1373}
1374
1375/*
1376 * Open/initialize the controller. This is called (in the current kernel)
1377 * sometime after booting when the 'ifconfig' program is run.
1378 *
1379 * This routine should set everything up anew at each open, even
1380 * registers that "should" only need to be set once at boot, so that
1381 * there is non-reboot way to recover if something goes wrong.
1382 */
1383static int
1384tc35815_open(struct net_device *dev)
1385{
1386        struct tc35815_local *lp = netdev_priv(dev);
1387
1388        /*
1389         * This is used if the interrupt line can turned off (shared).
1390         * See 3c503.c for an example of selecting the IRQ at config-time.
1391         */
1392        if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED,
1393                        dev->name, dev))
1394                return -EAGAIN;
1395
1396        tc35815_chip_reset(dev);
1397
1398        if (tc35815_init_queues(dev) != 0) {
1399                free_irq(dev->irq, dev);
1400                return -EAGAIN;
1401        }
1402
1403#ifdef TC35815_NAPI
1404        napi_enable(&lp->napi);
1405#endif
1406
1407        /* Reset the hardware here. Don't forget to set the station address. */
1408        spin_lock_irq(&lp->lock);
1409        tc35815_chip_init(dev);
1410        spin_unlock_irq(&lp->lock);
1411
1412        netif_carrier_off(dev);
1413        /* schedule a link state check */
1414        phy_start(lp->phy_dev);
1415
1416        /* We are now ready to accept transmit requeusts from
1417         * the queueing layer of the networking.
1418         */
1419        netif_start_queue(dev);
1420
1421        return 0;
1422}
1423
1424/* This will only be invoked if your driver is _not_ in XOFF state.
1425 * What this means is that you need not check it, and that this
1426 * invariant will hold if you make sure that the netif_*_queue()
1427 * calls are done at the proper times.
1428 */
1429static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1430{
1431        struct tc35815_local *lp = netdev_priv(dev);
1432        struct TxFD *txfd;
1433        unsigned long flags;
1434
1435        /* If some error occurs while trying to transmit this
1436         * packet, you should return '1' from this function.
1437         * In such a case you _may not_ do anything to the
1438         * SKB, it is still owned by the network queueing
1439         * layer when an error is returned.  This means you
1440         * may not modify any SKB fields, you may not free
1441         * the SKB, etc.
1442         */
1443
1444        /* This is the most common case for modern hardware.
1445         * The spinlock protects this code from the TX complete
1446         * hardware interrupt handler.  Queue flow control is
1447         * thus managed under this lock as well.
1448         */
1449        spin_lock_irqsave(&lp->lock, flags);
1450
1451        /* failsafe... (handle txdone now if half of FDs are used) */
1452        if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
1453            TX_FD_NUM / 2)
1454                tc35815_txdone(dev);
1455
1456        if (netif_msg_pktdata(lp))
1457                print_eth(skb->data);
1458#ifdef DEBUG
1459        if (lp->tx_skbs[lp->tfd_start].skb) {
1460                printk("%s: tx_skbs conflict.\n", dev->name);
1461                panic_queues(dev);
1462        }
1463#else
1464        BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
1465#endif
1466        lp->tx_skbs[lp->tfd_start].skb = skb;
1467        lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1468
1469        /*add to ring */
1470        txfd = &lp->tfd_base[lp->tfd_start];
1471        txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
1472        txfd->bd.BDCtl = cpu_to_le32(skb->len);
1473        txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
1474        txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
1475
1476        if (lp->tfd_start == lp->tfd_end) {
1477                struct tc35815_regs __iomem *tr =
1478                        (struct tc35815_regs __iomem *)dev->base_addr;
1479                /* Start DMA Transmitter. */
1480                txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1481#ifdef GATHER_TXINT
1482                txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1483#endif
1484                if (netif_msg_tx_queued(lp)) {
1485                        printk("%s: starting TxFD.\n", dev->name);
1486                        dump_txfd(txfd);
1487                }
1488                tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1489        } else {
1490                txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
1491                if (netif_msg_tx_queued(lp)) {
1492                        printk("%s: queueing TxFD.\n", dev->name);
1493                        dump_txfd(txfd);
1494                }
1495        }
1496        lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1497
1498        dev->trans_start = jiffies;
1499
1500        /* If we just used up the very last entry in the
1501         * TX ring on this device, tell the queueing
1502         * layer to send no more.
1503         */
1504        if (tc35815_tx_full(dev)) {
1505                if (netif_msg_tx_queued(lp))
1506                        printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
1507                netif_stop_queue(dev);
1508        }
1509
1510        /* When the TX completion hw interrupt arrives, this
1511         * is when the transmit statistics are updated.
1512         */
1513
1514        spin_unlock_irqrestore(&lp->lock, flags);
1515        return NETDEV_TX_OK;
1516}
1517
1518#define FATAL_ERROR_INT \
1519        (Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
1520static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1521{
1522        static int count;
1523        printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
1524               dev->name, status);
1525        if (status & Int_IntPCI)
1526                printk(" IntPCI");
1527        if (status & Int_DmParErr)
1528                printk(" DmParErr");
1529        if (status & Int_IntNRAbt)
1530                printk(" IntNRAbt");
1531        printk("\n");
1532        if (count++ > 100)
1533                panic("%s: Too many fatal errors.", dev->name);
1534        printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1535        /* Try to restart the adaptor. */
1536        tc35815_schedule_restart(dev);
1537}
1538
1539#ifdef TC35815_NAPI
1540static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1541#else
1542static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1543#endif
1544{
1545        struct tc35815_local *lp = netdev_priv(dev);
1546        int ret = -1;
1547
1548        /* Fatal errors... */
1549        if (status & FATAL_ERROR_INT) {
1550                tc35815_fatal_error_interrupt(dev, status);
1551                return 0;
1552        }
1553        /* recoverable errors */
1554        if (status & Int_IntFDAEx) {
1555                if (netif_msg_rx_err(lp))
1556                        dev_warn(&dev->dev,
1557                                 "Free Descriptor Area Exhausted (%#x).\n",
1558                                 status);
1559                dev->stats.rx_dropped++;
1560                ret = 0;
1561        }
1562        if (status & Int_IntBLEx) {
1563                if (netif_msg_rx_err(lp))
1564                        dev_warn(&dev->dev,
1565                                 "Buffer List Exhausted (%#x).\n",
1566                                 status);
1567                dev->stats.rx_dropped++;
1568                ret = 0;
1569        }
1570        if (status & Int_IntExBD) {
1571                if (netif_msg_rx_err(lp))
1572                        dev_warn(&dev->dev,
1573                                 "Excessive Buffer Descriptiors (%#x).\n",
1574                                 status);
1575                dev->stats.rx_length_errors++;
1576                ret = 0;
1577        }
1578
1579        /* normal notification */
1580        if (status & Int_IntMacRx) {
1581                /* Got a packet(s). */
1582#ifdef TC35815_NAPI
1583                ret = tc35815_rx(dev, limit);
1584#else
1585                tc35815_rx(dev);
1586                ret = 0;
1587#endif
1588                lp->lstats.rx_ints++;
1589        }
1590        if (status & Int_IntMacTx) {
1591                /* Transmit complete. */
1592                lp->lstats.tx_ints++;
1593                tc35815_txdone(dev);
1594                netif_wake_queue(dev);
1595                ret = 0;
1596        }
1597        return ret;
1598}
1599
1600/*
1601 * The typical workload of the driver:
1602 * Handle the network interface interrupts.
1603 */
1604static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1605{
1606        struct net_device *dev = dev_id;
1607        struct tc35815_local *lp = netdev_priv(dev);
1608        struct tc35815_regs __iomem *tr =
1609                (struct tc35815_regs __iomem *)dev->base_addr;
1610#ifdef TC35815_NAPI
1611        u32 dmactl = tc_readl(&tr->DMA_Ctl);
1612
1613        if (!(dmactl & DMA_IntMask)) {
1614                /* disable interrupts */
1615                tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1616                if (napi_schedule_prep(&lp->napi))
1617                        __napi_schedule(&lp->napi);
1618                else {
1619                        printk(KERN_ERR "%s: interrupt taken in poll\n",
1620                               dev->name);
1621                        BUG();
1622                }
1623                (void)tc_readl(&tr->Int_Src);   /* flush */
1624                return IRQ_HANDLED;
1625        }
1626        return IRQ_NONE;
1627#else
1628        int handled;
1629        u32 status;
1630
1631        spin_lock(&lp->lock);
1632        status = tc_readl(&tr->Int_Src);
1633        /* BLEx, FDAEx will be cleared later */
1634        tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1635                  &tr->Int_Src);        /* write to clear */
1636        handled = tc35815_do_interrupt(dev, status);
1637        if (status & (Int_BLEx | Int_FDAEx))
1638                tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src);
1639        (void)tc_readl(&tr->Int_Src);   /* flush */
1640        spin_unlock(&lp->lock);
1641        return IRQ_RETVAL(handled >= 0);
1642#endif /* TC35815_NAPI */
1643}
1644
1645#ifdef CONFIG_NET_POLL_CONTROLLER
1646static void tc35815_poll_controller(struct net_device *dev)
1647{
1648        disable_irq(dev->irq);
1649        tc35815_interrupt(dev->irq, dev);
1650        enable_irq(dev->irq);
1651}
1652#endif
1653
1654/* We have a good packet(s), get it/them out of the buffers. */
1655#ifdef TC35815_NAPI
1656static int
1657tc35815_rx(struct net_device *dev, int limit)
1658#else
1659static void
1660tc35815_rx(struct net_device *dev)
1661#endif
1662{
1663        struct tc35815_local *lp = netdev_priv(dev);
1664        unsigned int fdctl;
1665        int i;
1666#ifdef TC35815_NAPI
1667        int received = 0;
1668#endif
1669
1670        while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1671                int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
1672                int pkt_len = fdctl & FD_FDLength_MASK;
1673                int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1674#ifdef DEBUG
1675                struct RxFD *next_rfd;
1676#endif
1677#if (RX_CTL_CMD & Rx_StripCRC) == 0
1678                pkt_len -= ETH_FCS_LEN;
1679#endif
1680
1681                if (netif_msg_rx_status(lp))
1682                        dump_rxfd(lp->rfd_cur);
1683                if (status & Rx_Good) {
1684                        struct sk_buff *skb;
1685                        unsigned char *data;
1686                        int cur_bd;
1687#ifdef TC35815_USE_PACKEDBUFFER
1688                        int offset;
1689#endif
1690
1691#ifdef TC35815_NAPI
1692                        if (--limit < 0)
1693                                break;
1694#endif
1695#ifdef TC35815_USE_PACKEDBUFFER
1696                        BUG_ON(bd_count > 2);
1697                        skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1698                        if (skb == NULL) {
1699                                printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1700                                       dev->name);
1701                                dev->stats.rx_dropped++;
1702                                break;
1703                        }
1704                        skb_reserve(skb, NET_IP_ALIGN);
1705
1706                        data = skb_put(skb, pkt_len);
1707
1708                        /* copy from receive buffer */
1709                        cur_bd = 0;
1710                        offset = 0;
1711                        while (offset < pkt_len && cur_bd < bd_count) {
1712                                int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
1713                                        BD_BuffLength_MASK;
1714                                dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
1715                                void *rxbuf = rxbuf_bus_to_virt(lp, dma);
1716                                if (offset + len > pkt_len)
1717                                        len = pkt_len - offset;
1718#ifdef TC35815_DMA_SYNC_ONDEMAND
1719                                pci_dma_sync_single_for_cpu(lp->pci_dev,
1720                                                            dma, len,
1721                                                            PCI_DMA_FROMDEVICE);
1722#endif
1723                                memcpy(data + offset, rxbuf, len);
1724#ifdef TC35815_DMA_SYNC_ONDEMAND
1725                                pci_dma_sync_single_for_device(lp->pci_dev,
1726                                                               dma, len,
1727                                                               PCI_DMA_FROMDEVICE);
1728#endif
1729                                offset += len;
1730                                cur_bd++;
1731                        }
1732#else /* TC35815_USE_PACKEDBUFFER */
1733                        BUG_ON(bd_count > 1);
1734                        cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1735                                  & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1736#ifdef DEBUG
1737                        if (cur_bd >= RX_BUF_NUM) {
1738                                printk("%s: invalid BDID.\n", dev->name);
1739                                panic_queues(dev);
1740                        }
1741                        BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
1742                               (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
1743                        if (!lp->rx_skbs[cur_bd].skb) {
1744                                printk("%s: NULL skb.\n", dev->name);
1745                                panic_queues(dev);
1746                        }
1747#else
1748                        BUG_ON(cur_bd >= RX_BUF_NUM);
1749#endif
1750                        skb = lp->rx_skbs[cur_bd].skb;
1751                        prefetch(skb->data);
1752                        lp->rx_skbs[cur_bd].skb = NULL;
1753                        pci_unmap_single(lp->pci_dev,
1754                                         lp->rx_skbs[cur_bd].skb_dma,
1755                                         RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1756                        if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
1757                                memmove(skb->data, skb->data - NET_IP_ALIGN,
1758                                        pkt_len);
1759                        data = skb_put(skb, pkt_len);
1760#endif /* TC35815_USE_PACKEDBUFFER */
1761                        if (netif_msg_pktdata(lp))
1762                                print_eth(data);
1763                        skb->protocol = eth_type_trans(skb, dev);
1764#ifdef TC35815_NAPI
1765                        netif_receive_skb(skb);
1766                        received++;
1767#else
1768                        netif_rx(skb);
1769#endif
1770                        dev->stats.rx_packets++;
1771                        dev->stats.rx_bytes += pkt_len;
1772                } else {
1773                        dev->stats.rx_errors++;
1774                        if (netif_msg_rx_err(lp))
1775                                dev_info(&dev->dev, "Rx error (status %x)\n",
1776                                         status & Rx_Stat_Mask);
1777                        /* WORKAROUND: LongErr and CRCErr means Overflow. */
1778                        if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
1779                                status &= ~(Rx_LongErr|Rx_CRCErr);
1780                                status |= Rx_Over;
1781                        }
1782                        if (status & Rx_LongErr)
1783                                dev->stats.rx_length_errors++;
1784                        if (status & Rx_Over)
1785                                dev->stats.rx_fifo_errors++;
1786                        if (status & Rx_CRCErr)
1787                                dev->stats.rx_crc_errors++;
1788                        if (status & Rx_Align)
1789                                dev->stats.rx_frame_errors++;
1790                }
1791
1792                if (bd_count > 0) {
1793                        /* put Free Buffer back to controller */
1794                        int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
1795                        unsigned char id =
1796                                (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1797#ifdef DEBUG
1798                        if (id >= RX_BUF_NUM) {
1799                                printk("%s: invalid BDID.\n", dev->name);
1800                                panic_queues(dev);
1801                        }
1802#else
1803                        BUG_ON(id >= RX_BUF_NUM);
1804#endif
1805                        /* free old buffers */
1806#ifdef TC35815_USE_PACKEDBUFFER
1807                        while (lp->fbl_curid != id)
1808#else
1809                        lp->fbl_count--;
1810                        while (lp->fbl_count < RX_BUF_NUM)
1811#endif
1812                        {
1813#ifdef TC35815_USE_PACKEDBUFFER
1814                                unsigned char curid = lp->fbl_curid;
1815#else
1816                                unsigned char curid =
1817                                        (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1818#endif
1819                                struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1820#ifdef DEBUG
1821                                bdctl = le32_to_cpu(bd->BDCtl);
1822                                if (bdctl & BD_CownsBD) {
1823                                        printk("%s: Freeing invalid BD.\n",
1824                                               dev->name);
1825                                        panic_queues(dev);
1826                                }
1827#endif
1828                                /* pass BD to controller */
1829#ifndef TC35815_USE_PACKEDBUFFER
1830                                if (!lp->rx_skbs[curid].skb) {
1831                                        lp->rx_skbs[curid].skb =
1832                                                alloc_rxbuf_skb(dev,
1833                                                                lp->pci_dev,
1834                                                                &lp->rx_skbs[curid].skb_dma);
1835                                        if (!lp->rx_skbs[curid].skb)
1836                                                break; /* try on next reception */
1837                                        bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1838                                }
1839#endif /* TC35815_USE_PACKEDBUFFER */
1840                                /* Note: BDLength was modified by chip. */
1841                                bd->BDCtl = cpu_to_le32(BD_CownsBD |
1842                                                        (curid << BD_RxBDID_SHIFT) |
1843                                                        RX_BUF_SIZE);
1844#ifdef TC35815_USE_PACKEDBUFFER
1845                                lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
1846                                if (netif_msg_rx_status(lp)) {
1847                                        printk("%s: Entering new FBD %d\n",
1848                                               dev->name, lp->fbl_curid);
1849                                        dump_frfd(lp->fbl_ptr);
1850                                }
1851#else
1852                                lp->fbl_count++;
1853#endif
1854                        }
1855                }
1856
1857                /* put RxFD back to controller */
1858#ifdef DEBUG
1859                next_rfd = fd_bus_to_virt(lp,
1860                                          le32_to_cpu(lp->rfd_cur->fd.FDNext));
1861                if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
1862                        printk("%s: RxFD FDNext invalid.\n", dev->name);
1863                        panic_queues(dev);
1864                }
1865#endif
1866                for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
1867                        /* pass FD to controller */
1868#ifdef DEBUG
1869                        lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
1870#else
1871                        lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
1872#endif
1873                        lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
1874                        lp->rfd_cur++;
1875                }
1876                if (lp->rfd_cur > lp->rfd_limit)
1877                        lp->rfd_cur = lp->rfd_base;
1878#ifdef DEBUG
1879                if (lp->rfd_cur != next_rfd)
1880                        printk("rfd_cur = %p, next_rfd %p\n",
1881                               lp->rfd_cur, next_rfd);
1882#endif
1883        }
1884
1885#ifdef TC35815_NAPI
1886        return received;
1887#endif
1888}
1889
1890#ifdef TC35815_NAPI
1891static int tc35815_poll(struct napi_struct *napi, int budget)
1892{
1893        struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
1894        struct net_device *dev = lp->dev;
1895        struct tc35815_regs __iomem *tr =
1896                (struct tc35815_regs __iomem *)dev->base_addr;
1897        int received = 0, handled;
1898        u32 status;
1899
1900        spin_lock(&lp->lock);
1901        status = tc_readl(&tr->Int_Src);
1902        do {
1903                /* BLEx, FDAEx will be cleared later */
1904                tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1905                          &tr->Int_Src);        /* write to clear */
1906
1907                handled = tc35815_do_interrupt(dev, status, budget - received);
1908                if (status & (Int_BLEx | Int_FDAEx))
1909                        tc_writel(status & (Int_BLEx | Int_FDAEx),
1910                                  &tr->Int_Src);
1911                if (handled >= 0) {
1912                        received += handled;
1913                        if (received >= budget)
1914                                break;
1915                }
1916                status = tc_readl(&tr->Int_Src);
1917        } while (status);
1918        spin_unlock(&lp->lock);
1919
1920        if (received < budget) {
1921                napi_complete(napi);
1922                /* enable interrupts */
1923                tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1924        }
1925        return received;
1926}
1927#endif
1928
1929#ifdef NO_CHECK_CARRIER
1930#define TX_STA_ERR      (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1931#else
1932#define TX_STA_ERR      (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1933#endif
1934
1935static void
1936tc35815_check_tx_stat(struct net_device *dev, int status)
1937{
1938        struct tc35815_local *lp = netdev_priv(dev);
1939        const char *msg = NULL;
1940
1941        /* count collisions */
1942        if (status & Tx_ExColl)
1943                dev->stats.collisions += 16;
1944        if (status & Tx_TxColl_MASK)
1945                dev->stats.collisions += status & Tx_TxColl_MASK;
1946
1947#ifndef NO_CHECK_CARRIER
1948        /* TX4939 does not have NCarr */
1949        if (lp->chiptype == TC35815_TX4939)
1950                status &= ~Tx_NCarr;
1951#ifdef WORKAROUND_LOSTCAR
1952        /* WORKAROUND: ignore LostCrS in full duplex operation */
1953        if (!lp->link || lp->duplex == DUPLEX_FULL)
1954                status &= ~Tx_NCarr;
1955#endif
1956#endif
1957
1958        if (!(status & TX_STA_ERR)) {
1959                /* no error. */
1960                dev->stats.tx_packets++;
1961                return;
1962        }
1963
1964        dev->stats.tx_errors++;
1965        if (status & Tx_ExColl) {
1966                dev->stats.tx_aborted_errors++;
1967                msg = "Excessive Collision.";
1968        }
1969        if (status & Tx_Under) {
1970                dev->stats.tx_fifo_errors++;
1971                msg = "Tx FIFO Underrun.";
1972                if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1973                        lp->lstats.tx_underrun++;
1974                        if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
1975                                struct tc35815_regs __iomem *tr =
1976                                        (struct tc35815_regs __iomem *)dev->base_addr;
1977                                tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
1978                                msg = "Tx FIFO Underrun.Change Tx threshold to max.";
1979                        }
1980                }
1981        }
1982        if (status & Tx_Defer) {
1983                dev->stats.tx_fifo_errors++;
1984                msg = "Excessive Deferral.";
1985        }
1986#ifndef NO_CHECK_CARRIER
1987        if (status & Tx_NCarr) {
1988                dev->stats.tx_carrier_errors++;
1989                msg = "Lost Carrier Sense.";
1990        }
1991#endif
1992        if (status & Tx_LateColl) {
1993                dev->stats.tx_aborted_errors++;
1994                msg = "Late Collision.";
1995        }
1996        if (status & Tx_TxPar) {
1997                dev->stats.tx_fifo_errors++;
1998                msg = "Transmit Parity Error.";
1999        }
2000        if (status & Tx_SQErr) {
2001                dev->stats.tx_heartbeat_errors++;
2002                msg = "Signal Quality Error.";
2003        }
2004        if (msg && netif_msg_tx_err(lp))
2005                printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
2006}
2007
2008/* This handles TX complete events posted by the device
2009 * via interrupts.
2010 */
2011static void
2012tc35815_txdone(struct net_device *dev)
2013{
2014        struct tc35815_local *lp = netdev_priv(dev);
2015        struct TxFD *txfd;
2016        unsigned int fdctl;
2017
2018        txfd = &lp->tfd_base[lp->tfd_end];
2019        while (lp->tfd_start != lp->tfd_end &&
2020               !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
2021                int status = le32_to_cpu(txfd->fd.FDStat);
2022                struct sk_buff *skb;
2023                unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
2024                u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
2025
2026                if (netif_msg_tx_done(lp)) {
2027                        printk("%s: complete TxFD.\n", dev->name);
2028                        dump_txfd(txfd);
2029                }
2030                tc35815_check_tx_stat(dev, status);
2031
2032                skb = fdsystem != 0xffffffff ?
2033                        lp->tx_skbs[fdsystem].skb : NULL;
2034#ifdef DEBUG
2035                if (lp->tx_skbs[lp->tfd_end].skb != skb) {
2036                        printk("%s: tx_skbs mismatch.\n", dev->name);
2037                        panic_queues(dev);
2038                }
2039#else
2040                BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
2041#endif
2042                if (skb) {
2043                        dev->stats.tx_bytes += skb->len;
2044                        pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
2045                        lp->tx_skbs[lp->tfd_end].skb = NULL;
2046                        lp->tx_skbs[lp->tfd_end].skb_dma = 0;
2047#ifdef TC35815_NAPI
2048                        dev_kfree_skb_any(skb);
2049#else
2050                        dev_kfree_skb_irq(skb);
2051#endif
2052                }
2053                txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
2054
2055                lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
2056                txfd = &lp->tfd_base[lp->tfd_end];
2057#ifdef DEBUG
2058                if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
2059                        printk("%s: TxFD FDNext invalid.\n", dev->name);
2060                        panic_queues(dev);
2061                }
2062#endif
2063                if (fdnext & FD_Next_EOL) {
2064                        /* DMA Transmitter has been stopping... */
2065                        if (lp->tfd_end != lp->tfd_start) {
2066                                struct tc35815_regs __iomem *tr =
2067                                        (struct tc35815_regs __iomem *)dev->base_addr;
2068                                int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
2069                                struct TxFD *txhead = &lp->tfd_base[head];
2070                                int qlen = (lp->tfd_start + TX_FD_NUM
2071                                            - lp->tfd_end) % TX_FD_NUM;
2072
2073#ifdef DEBUG
2074                                if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
2075                                        printk("%s: TxFD FDCtl invalid.\n", dev->name);
2076                                        panic_queues(dev);
2077                                }
2078#endif
2079                                /* log max queue length */
2080                                if (lp->lstats.max_tx_qlen < qlen)
2081                                        lp->lstats.max_tx_qlen = qlen;
2082
2083
2084                                /* start DMA Transmitter again */
2085                                txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
2086#ifdef GATHER_TXINT
2087                                txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
2088#endif
2089                                if (netif_msg_tx_queued(lp)) {
2090                                        printk("%s: start TxFD on queue.\n",
2091                                               dev->name);
2092                                        dump_txfd(txfd);
2093                                }
2094                                tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
2095                        }
2096                        break;
2097                }
2098        }
2099
2100        /* If we had stopped the queue due to a "tx full"
2101         * condition, and space has now been made available,
2102         * wake up the queue.
2103         */
2104        if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
2105                netif_wake_queue(dev);
2106}
2107
2108/* The inverse routine to tc35815_open(). */
2109static int
2110tc35815_close(struct net_device *dev)
2111{
2112        struct tc35815_local *lp = netdev_priv(dev);
2113
2114        netif_stop_queue(dev);
2115#ifdef TC35815_NAPI
2116        napi_disable(&lp->napi);
2117#endif
2118        if (lp->phy_dev)
2119                phy_stop(lp->phy_dev);
2120        cancel_work_sync(&lp->restart_work);
2121
2122        /* Flush the Tx and disable Rx here. */
2123        tc35815_chip_reset(dev);
2124        free_irq(dev->irq, dev);
2125
2126        tc35815_free_queues(dev);
2127
2128        return 0;
2129
2130}
2131
2132/*
2133 * Get the current statistics.
2134 * This may be called with the card open or closed.
2135 */
2136static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
2137{
2138        struct tc35815_regs __iomem *tr =
2139                (struct tc35815_regs __iomem *)dev->base_addr;
2140        if (netif_running(dev))
2141                /* Update the statistics from the device registers. */
2142                dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
2143
2144        return &dev->stats;
2145}
2146
2147static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
2148{
2149        struct tc35815_local *lp = netdev_priv(dev);
2150        struct tc35815_regs __iomem *tr =
2151                (struct tc35815_regs __iomem *)dev->base_addr;
2152        int cam_index = index * 6;
2153        u32 cam_data;
2154        u32 saved_addr;
2155
2156        saved_addr = tc_readl(&tr->CAM_Adr);
2157
2158        if (netif_msg_hw(lp))
2159                printk(KERN_DEBUG "%s: CAM %d: %pM\n",
2160                        dev->name, index, addr);
2161        if (index & 1) {
2162                /* read modify write */
2163                tc_writel(cam_index - 2, &tr->CAM_Adr);
2164                cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
2165                cam_data |= addr[0] << 8 | addr[1];
2166                tc_writel(cam_data, &tr->CAM_Data);
2167                /* write whole word */
2168                tc_writel(cam_index + 2, &tr->CAM_Adr);
2169                cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
2170                tc_writel(cam_data, &tr->CAM_Data);
2171        } else {
2172                /* write whole word */
2173                tc_writel(cam_index, &tr->CAM_Adr);
2174                cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
2175                tc_writel(cam_data, &tr->CAM_Data);
2176                /* read modify write */
2177                tc_writel(cam_index + 4, &tr->CAM_Adr);
2178                cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
2179                cam_data |= addr[4] << 24 | (addr[5] << 16);
2180                tc_writel(cam_data, &tr->CAM_Data);
2181        }
2182
2183        tc_writel(saved_addr, &tr->CAM_Adr);
2184}
2185
2186
2187/*
2188 * Set or clear the multicast filter for this adaptor.
2189 * num_addrs == -1      Promiscuous mode, receive all packets
2190 * num_addrs == 0       Normal mode, clear multicast list
2191 * num_addrs > 0        Multicast mode, receive normal and MC packets,
2192 *                      and do best-effort filtering.
2193 */
2194static void
2195tc35815_set_multicast_list(struct net_device *dev)
2196{
2197        struct tc35815_regs __iomem *tr =
2198                (struct tc35815_regs __iomem *)dev->base_addr;
2199
2200        if (dev->flags & IFF_PROMISC) {
2201#ifdef WORKAROUND_100HALF_PROMISC
2202                /* With some (all?) 100MHalf HUB, controller will hang
2203                 * if we enabled promiscuous mode before linkup... */
2204                struct tc35815_local *lp = netdev_priv(dev);
2205
2206                if (!lp->link)
2207                        return;
2208#endif
2209                /* Enable promiscuous mode */
2210                tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2211        } else if ((dev->flags & IFF_ALLMULTI) ||
2212                  dev->mc_count > CAM_ENTRY_MAX - 3) {
2213                /* CAM 0, 1, 20 are reserved. */
2214                /* Disable promiscuous mode, use normal mode. */
2215                tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
2216        } else if (dev->mc_count) {
2217                struct dev_mc_list *cur_addr = dev->mc_list;
2218                int i;
2219                int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
2220
2221                tc_writel(0, &tr->CAM_Ctl);
2222                /* Walk the address list, and load the filter */
2223                for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
2224                        if (!cur_addr)
2225                                break;
2226                        /* entry 0,1 is reserved. */
2227                        tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
2228                        ena_bits |= CAM_Ena_Bit(i + 2);
2229                }
2230                tc_writel(ena_bits, &tr->CAM_Ena);
2231                tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2232        } else {
2233                tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2234                tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2235        }
2236}
2237
2238static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2239{
2240        struct tc35815_local *lp = netdev_priv(dev);
2241        strcpy(info->driver, MODNAME);
2242        strcpy(info->version, DRV_VERSION);
2243        strcpy(info->bus_info, pci_name(lp->pci_dev));
2244}
2245
2246static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2247{
2248        struct tc35815_local *lp = netdev_priv(dev);
2249
2250        if (!lp->phy_dev)
2251                return -ENODEV;
2252        return phy_ethtool_gset(lp->phy_dev, cmd);
2253}
2254
2255static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2256{
2257        struct tc35815_local *lp = netdev_priv(dev);
2258
2259        if (!lp->phy_dev)
2260                return -ENODEV;
2261        return phy_ethtool_sset(lp->phy_dev, cmd);
2262}
2263
2264static u32 tc35815_get_msglevel(struct net_device *dev)
2265{
2266        struct tc35815_local *lp = netdev_priv(dev);
2267        return lp->msg_enable;
2268}
2269
2270static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
2271{
2272        struct tc35815_local *lp = netdev_priv(dev);
2273        lp->msg_enable = datum;
2274}
2275
2276static int tc35815_get_sset_count(struct net_device *dev, int sset)
2277{
2278        struct tc35815_local *lp = netdev_priv(dev);
2279
2280        switch (sset) {
2281        case ETH_SS_STATS:
2282                return sizeof(lp->lstats) / sizeof(int);
2283        default:
2284                return -EOPNOTSUPP;
2285        }
2286}
2287
2288static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2289{
2290        struct tc35815_local *lp = netdev_priv(dev);
2291        data[0] = lp->lstats.max_tx_qlen;
2292        data[1] = lp->lstats.tx_ints;
2293        data[2] = lp->lstats.rx_ints;
2294        data[3] = lp->lstats.tx_underrun;
2295}
2296
2297static struct {
2298        const char str[ETH_GSTRING_LEN];
2299} ethtool_stats_keys[] = {
2300        { "max_tx_qlen" },
2301        { "tx_ints" },
2302        { "rx_ints" },
2303        { "tx_underrun" },
2304};
2305
2306static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2307{
2308        memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2309}
2310
2311static const struct ethtool_ops tc35815_ethtool_ops = {
2312        .get_drvinfo            = tc35815_get_drvinfo,
2313        .get_settings           = tc35815_get_settings,
2314        .set_settings           = tc35815_set_settings,
2315        .get_link               = ethtool_op_get_link,
2316        .get_msglevel           = tc35815_get_msglevel,
2317        .set_msglevel           = tc35815_set_msglevel,
2318        .get_strings            = tc35815_get_strings,
2319        .get_sset_count         = tc35815_get_sset_count,
2320        .get_ethtool_stats      = tc35815_get_ethtool_stats,
2321};
2322
2323static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2324{
2325        struct tc35815_local *lp = netdev_priv(dev);
2326
2327        if (!netif_running(dev))
2328                return -EINVAL;
2329        if (!lp->phy_dev)
2330                return -ENODEV;
2331        return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd);
2332}
2333
2334static void tc35815_chip_reset(struct net_device *dev)
2335{
2336        struct tc35815_regs __iomem *tr =
2337                (struct tc35815_regs __iomem *)dev->base_addr;
2338        int i;
2339        /* reset the controller */
2340        tc_writel(MAC_Reset, &tr->MAC_Ctl);
2341        udelay(4); /* 3200ns */
2342        i = 0;
2343        while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
2344                if (i++ > 100) {
2345                        printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
2346                        break;
2347                }
2348                mdelay(1);
2349        }
2350        tc_writel(0, &tr->MAC_Ctl);
2351
2352        /* initialize registers to default value */
2353        tc_writel(0, &tr->DMA_Ctl);
2354        tc_writel(0, &tr->TxThrsh);
2355        tc_writel(0, &tr->TxPollCtr);
2356        tc_writel(0, &tr->RxFragSize);
2357        tc_writel(0, &tr->Int_En);
2358        tc_writel(0, &tr->FDA_Bas);
2359        tc_writel(0, &tr->FDA_Lim);
2360        tc_writel(0xffffffff, &tr->Int_Src);    /* Write 1 to clear */
2361        tc_writel(0, &tr->CAM_Ctl);
2362        tc_writel(0, &tr->Tx_Ctl);
2363        tc_writel(0, &tr->Rx_Ctl);
2364        tc_writel(0, &tr->CAM_Ena);
2365        (void)tc_readl(&tr->Miss_Cnt);  /* Read to clear */
2366
2367        /* initialize internal SRAM */
2368        tc_writel(DMA_TestMode, &tr->DMA_Ctl);
2369        for (i = 0; i < 0x1000; i += 4) {
2370                tc_writel(i, &tr->CAM_Adr);
2371                tc_writel(0, &tr->CAM_Data);
2372        }
2373        tc_writel(0, &tr->DMA_Ctl);
2374}
2375
2376static void tc35815_chip_init(struct net_device *dev)
2377{
2378        struct tc35815_local *lp = netdev_priv(dev);
2379        struct tc35815_regs __iomem *tr =
2380                (struct tc35815_regs __iomem *)dev->base_addr;
2381        unsigned long txctl = TX_CTL_CMD;
2382
2383        /* load station address to CAM */
2384        tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2385
2386        /* Enable CAM (broadcast and unicast) */
2387        tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2388        tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2389
2390        /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */
2391        if (HAVE_DMA_RXALIGN(lp))
2392                tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2393        else
2394                tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2395#ifdef TC35815_USE_PACKEDBUFFER
2396        tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize);   /* Packing */
2397#endif
2398        tc_writel(0, &tr->TxPollCtr);   /* Batch mode */
2399        tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2400        tc_writel(INT_EN_CMD, &tr->Int_En);
2401
2402        /* set queues */
2403        tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
2404        tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
2405                  &tr->FDA_Lim);
2406        /*
2407         * Activation method:
2408         * First, enable the MAC Transmitter and the DMA Receive circuits.
2409         * Then enable the DMA Transmitter and the MAC Receive circuits.
2410         */
2411        tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr);      /* start DMA receiver */
2412        tc_writel(RX_CTL_CMD, &tr->Rx_Ctl);     /* start MAC receiver */
2413
2414        /* start MAC transmitter */
2415#ifndef NO_CHECK_CARRIER
2416        /* TX4939 does not have EnLCarr */
2417        if (lp->chiptype == TC35815_TX4939)
2418                txctl &= ~Tx_EnLCarr;
2419#ifdef WORKAROUND_LOSTCAR
2420        /* WORKAROUND: ignore LostCrS in full duplex operation */
2421        if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2422                txctl &= ~Tx_EnLCarr;
2423#endif
2424#endif /* !NO_CHECK_CARRIER */
2425#ifdef GATHER_TXINT
2426        txctl &= ~Tx_EnComp;    /* disable global tx completion int. */
2427#endif
2428        tc_writel(txctl, &tr->Tx_Ctl);
2429}
2430
2431#ifdef CONFIG_PM
2432static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2433{
2434        struct net_device *dev = pci_get_drvdata(pdev);
2435        struct tc35815_local *lp = netdev_priv(dev);
2436        unsigned long flags;
2437
2438        pci_save_state(pdev);
2439        if (!netif_running(dev))
2440                return 0;
2441        netif_device_detach(dev);
2442        if (lp->phy_dev)
2443                phy_stop(lp->phy_dev);
2444        spin_lock_irqsave(&lp->lock, flags);
2445        tc35815_chip_reset(dev);
2446        spin_unlock_irqrestore(&lp->lock, flags);
2447        pci_set_power_state(pdev, PCI_D3hot);
2448        return 0;
2449}
2450
2451static int tc35815_resume(struct pci_dev *pdev)
2452{
2453        struct net_device *dev = pci_get_drvdata(pdev);
2454        struct tc35815_local *lp = netdev_priv(dev);
2455
2456        pci_restore_state(pdev);
2457        if (!netif_running(dev))
2458                return 0;
2459        pci_set_power_state(pdev, PCI_D0);
2460        tc35815_restart(dev);
2461        netif_carrier_off(dev);
2462        if (lp->phy_dev)
2463                phy_start(lp->phy_dev);
2464        netif_device_attach(dev);
2465        return 0;
2466}
2467#endif /* CONFIG_PM */
2468
2469static struct pci_driver tc35815_pci_driver = {
2470        .name           = MODNAME,
2471        .id_table       = tc35815_pci_tbl,
2472        .probe          = tc35815_init_one,
2473        .remove         = __devexit_p(tc35815_remove_one),
2474#ifdef CONFIG_PM
2475        .suspend        = tc35815_suspend,
2476        .resume         = tc35815_resume,
2477#endif
2478};
2479
2480module_param_named(speed, options.speed, int, 0);
2481MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2482module_param_named(duplex, options.duplex, int, 0);
2483MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2484
2485static int __init tc35815_init_module(void)
2486{
2487        return pci_register_driver(&tc35815_pci_driver);
2488}
2489
2490static void __exit tc35815_cleanup_module(void)
2491{
2492        pci_unregister_driver(&tc35815_pci_driver);
2493}
2494
2495module_init(tc35815_init_module);
2496module_exit(tc35815_cleanup_module);
2497
2498MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2499MODULE_LICENSE("GPL");
2500