linux/drivers/net/ethernet/dec/tulip/dmfe.c
<<
>>
Prefs
   1/*
   2    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
   3    ethernet driver for Linux.
   4    Copyright (C) 1997  Sten Wang
   5
   6    This program is free software; you can redistribute it and/or
   7    modify it under the terms of the GNU General Public License
   8    as published by the Free Software Foundation; either version 2
   9    of the License, or (at your option) any later version.
  10
  11    This program is distributed in the hope that it will be useful,
  12    but WITHOUT ANY WARRANTY; without even the implied warranty of
  13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14    GNU General Public License for more details.
  15
  16    DAVICOM Web-Site: www.davicom.com.tw
  17
  18    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
  19    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
  20
  21    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
  22
  23    Marcelo Tosatti <marcelo@conectiva.com.br> :
  24    Made it compile in 2.3 (device to net_device)
  25
  26    Alan Cox <alan@lxorguk.ukuu.org.uk> :
  27    Cleaned up for kernel merge.
  28    Removed the back compatibility support
  29    Reformatted, fixing spelling etc as I went
  30    Removed IRQ 0-15 assumption
  31
  32    Jeff Garzik <jgarzik@pobox.com> :
  33    Updated to use new PCI driver API.
  34    Resource usage cleanups.
  35    Report driver version to user.
  36
  37    Tobias Ringstrom <tori@unhappy.mine.nu> :
  38    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
  39    Andrew Morton and Frank Davis for the SMP safety fixes.
  40
  41    Vojtech Pavlik <vojtech@suse.cz> :
  42    Cleaned up pointer arithmetics.
  43    Fixed a lot of 64bit issues.
  44    Cleaned up printk()s a bit.
  45    Fixed some obvious big endian problems.
  46
  47    Tobias Ringstrom <tori@unhappy.mine.nu> :
  48    Use time_after for jiffies calculation.  Added ethtool
  49    support.  Updated PCI resource allocation.  Do not
  50    forget to unmap PCI mapped skbs.
  51
  52    Alan Cox <alan@lxorguk.ukuu.org.uk>
  53    Added new PCI identifiers provided by Clear Zhang at ALi
  54    for their 1563 ethernet device.
  55
  56    TODO
  57
  58    Check on 64 bit boxes.
  59    Check and fix on big endian boxes.
  60
  61    Test and make sure PCI latency is now correct for all cases.
  62*/
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#define DRV_NAME        "dmfe"
  67#define DRV_VERSION     "1.36.4"
  68#define DRV_RELDATE     "2002-01-17"
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/ptrace.h>
  75#include <linux/errno.h>
  76#include <linux/ioport.h>
  77#include <linux/interrupt.h>
  78#include <linux/pci.h>
  79#include <linux/dma-mapping.h>
  80#include <linux/init.h>
  81#include <linux/netdevice.h>
  82#include <linux/etherdevice.h>
  83#include <linux/ethtool.h>
  84#include <linux/skbuff.h>
  85#include <linux/delay.h>
  86#include <linux/spinlock.h>
  87#include <linux/crc32.h>
  88#include <linux/bitops.h>
  89
  90#include <asm/processor.h>
  91#include <asm/io.h>
  92#include <asm/dma.h>
  93#include <linux/uaccess.h>
  94#include <asm/irq.h>
  95
  96#ifdef CONFIG_TULIP_DM910X
  97#include <linux/of.h>
  98#endif
  99
 100
 101/* Board/System/Debug information/definition ---------------- */
 102#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
 103#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
 104#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
 105#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
 106
 107#define DM9102_IO_SIZE  0x80
 108#define DM9102A_IO_SIZE 0x100
 109#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
 110#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
 111#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
 112#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)      /* Max TX packet count */
 113#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)      /* TX wakeup count */
 114#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
 115#define TX_BUF_ALLOC    0x600
 116#define RX_ALLOC_SIZE   0x620
 117#define DM910X_RESET    1
 118#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
 119#define CR6_DEFAULT     0x00080000      /* HD */
 120#define CR7_DEFAULT     0x180c1
 121#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
 122#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
 123#define MAX_PACKET_SIZE 1514
 124#define DMFE_MAX_MULTICAST 14
 125#define RX_COPY_SIZE    100
 126#define MAX_CHECK_PACKET 0x8000
 127#define DM9801_NOISE_FLOOR 8
 128#define DM9802_NOISE_FLOOR 5
 129
 130#define DMFE_WOL_LINKCHANGE     0x20000000
 131#define DMFE_WOL_SAMPLEPACKET   0x10000000
 132#define DMFE_WOL_MAGICPACKET    0x08000000
 133
 134
 135#define DMFE_10MHF      0
 136#define DMFE_100MHF     1
 137#define DMFE_10MFD      4
 138#define DMFE_100MFD     5
 139#define DMFE_AUTO       8
 140#define DMFE_1M_HPNA    0x10
 141
 142#define DMFE_TXTH_72    0x400000        /* TX TH 72 byte */
 143#define DMFE_TXTH_96    0x404000        /* TX TH 96 byte */
 144#define DMFE_TXTH_128   0x0000          /* TX TH 128 byte */
 145#define DMFE_TXTH_256   0x4000          /* TX TH 256 byte */
 146#define DMFE_TXTH_512   0x8000          /* TX TH 512 byte */
 147#define DMFE_TXTH_1K    0xC000          /* TX TH 1K  byte */
 148
 149#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
 150#define DMFE_TX_TIMEOUT ((3*HZ)/2)      /* tx packet time-out time 1.5 s" */
 151#define DMFE_TX_KICK    (HZ/2)  /* tx packet Kick-out time 0.5 s" */
 152
 153#define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
 154#define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
 155#define dr32(reg)       ioread32(ioaddr + (reg))
 156#define dr16(reg)       ioread16(ioaddr + (reg))
 157#define dr8(reg)        ioread8(ioaddr + (reg))
 158
 159#define DMFE_DBUG(dbug_now, msg, value)                 \
 160        do {                                            \
 161                if (dmfe_debug || (dbug_now))           \
 162                        pr_err("%s %lx\n",              \
 163                               (msg), (long) (value));  \
 164        } while (0)
 165
 166#define SHOW_MEDIA_TYPE(mode)                           \
 167        pr_info("Change Speed to %sMhz %s duplex\n" ,   \
 168                (mode & 1) ? "100":"10",                \
 169                (mode & 4) ? "full":"half");
 170
 171
 172/* CR9 definition: SROM/MII */
 173#define CR9_SROM_READ   0x4800
 174#define CR9_SRCS        0x1
 175#define CR9_SRCLK       0x2
 176#define CR9_CRDOUT      0x8
 177#define SROM_DATA_0     0x0
 178#define SROM_DATA_1     0x4
 179#define PHY_DATA_1      0x20000
 180#define PHY_DATA_0      0x00000
 181#define MDCLKH          0x10000
 182
 183#define PHY_POWER_DOWN  0x800
 184
 185#define SROM_V41_CODE   0x14
 186
 187#define __CHK_IO_SIZE(pci_id, dev_rev) \
 188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
 189        DM9102A_IO_SIZE: DM9102_IO_SIZE)
 190
 191#define CHK_IO_SIZE(pci_dev) \
 192        (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
 193        (pci_dev)->revision))
 194
 195/* Structure/enum declaration ------------------------------- */
 196struct tx_desc {
 197        __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
 198        char *tx_buf_ptr;               /* Data for us */
 199        struct tx_desc *next_tx_desc;
 200} __attribute__(( aligned(32) ));
 201
 202struct rx_desc {
 203        __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
 204        struct sk_buff *rx_skb_ptr;     /* Data for us */
 205        struct rx_desc *next_rx_desc;
 206} __attribute__(( aligned(32) ));
 207
 208struct dmfe_board_info {
 209        u32 chip_id;                    /* Chip vendor/Device ID */
 210        u8 chip_revision;               /* Chip revision */
 211        struct net_device *next_dev;    /* next device */
 212        struct pci_dev *pdev;           /* PCI device */
 213        spinlock_t lock;
 214
 215        void __iomem *ioaddr;           /* I/O base address */
 216        u32 cr0_data;
 217        u32 cr5_data;
 218        u32 cr6_data;
 219        u32 cr7_data;
 220        u32 cr15_data;
 221
 222        /* pointer for memory physical address */
 223        dma_addr_t buf_pool_dma_ptr;    /* Tx buffer pool memory */
 224        dma_addr_t buf_pool_dma_start;  /* Tx buffer pool align dword */
 225        dma_addr_t desc_pool_dma_ptr;   /* descriptor pool memory */
 226        dma_addr_t first_tx_desc_dma;
 227        dma_addr_t first_rx_desc_dma;
 228
 229        /* descriptor pointer */
 230        unsigned char *buf_pool_ptr;    /* Tx buffer pool memory */
 231        unsigned char *buf_pool_start;  /* Tx buffer pool align dword */
 232        unsigned char *desc_pool_ptr;   /* descriptor pool memory */
 233        struct tx_desc *first_tx_desc;
 234        struct tx_desc *tx_insert_ptr;
 235        struct tx_desc *tx_remove_ptr;
 236        struct rx_desc *first_rx_desc;
 237        struct rx_desc *rx_insert_ptr;
 238        struct rx_desc *rx_ready_ptr;   /* packet come pointer */
 239        unsigned long tx_packet_cnt;    /* transmitted packet count */
 240        unsigned long tx_queue_cnt;     /* wait to send packet count */
 241        unsigned long rx_avail_cnt;     /* available rx descriptor count */
 242        unsigned long interval_rx_cnt;  /* rx packet count a callback time */
 243
 244        u16 HPNA_command;               /* For HPNA register 16 */
 245        u16 HPNA_timer;                 /* For HPNA remote device check */
 246        u16 dbug_cnt;
 247        u16 NIC_capability;             /* NIC media capability */
 248        u16 PHY_reg4;                   /* Saved Phyxcer register 4 value */
 249
 250        u8 HPNA_present;                /* 0:none, 1:DM9801, 2:DM9802 */
 251        u8 chip_type;                   /* Keep DM9102A chip type */
 252        u8 media_mode;                  /* user specify media mode */
 253        u8 op_mode;                     /* real work media mode */
 254        u8 phy_addr;
 255        u8 wait_reset;                  /* Hardware failed, need to reset */
 256        u8 dm910x_chk_mode;             /* Operating mode check */
 257        u8 first_in_callback;           /* Flag to record state */
 258        u8 wol_mode;                    /* user WOL settings */
 259        struct timer_list timer;
 260
 261        /* Driver defined statistic counter */
 262        unsigned long tx_fifo_underrun;
 263        unsigned long tx_loss_carrier;
 264        unsigned long tx_no_carrier;
 265        unsigned long tx_late_collision;
 266        unsigned long tx_excessive_collision;
 267        unsigned long tx_jabber_timeout;
 268        unsigned long reset_count;
 269        unsigned long reset_cr8;
 270        unsigned long reset_fatal;
 271        unsigned long reset_TXtimeout;
 272
 273        /* NIC SROM data */
 274        unsigned char srom[128];
 275};
 276
 277enum dmfe_offsets {
 278        DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
 279        DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
 280        DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
 281        DCR15 = 0x78
 282};
 283
 284enum dmfe_CR6_bits {
 285        CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
 286        CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
 287        CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
 288};
 289
 290/* Global variable declaration ----------------------------- */
 291static int printed_version;
 292static const char version[] =
 293        "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
 294
 295static int dmfe_debug;
 296static unsigned char dmfe_media_mode = DMFE_AUTO;
 297static u32 dmfe_cr6_user_set;
 298
 299/* For module input parameter */
 300static int debug;
 301static u32 cr6set;
 302static unsigned char mode = 8;
 303static u8 chkmode = 1;
 304static u8 HPNA_mode;            /* Default: Low Power/High Speed */
 305static u8 HPNA_rx_cmd;          /* Default: Disable Rx remote command */
 306static u8 HPNA_tx_cmd;          /* Default: Don't issue remote command */
 307static u8 HPNA_NoiseFloor;      /* Default: HPNA NoiseFloor */
 308static u8 SF_mode;              /* Special Function: 1:VLAN, 2:RX Flow Control
 309                                   4: TX pause packet */
 310
 311
 312/* function declaration ------------------------------------- */
 313static int dmfe_open(struct net_device *);
 314static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
 315static int dmfe_stop(struct net_device *);
 316static void dmfe_set_filter_mode(struct net_device *);
 317static const struct ethtool_ops netdev_ethtool_ops;
 318static u16 read_srom_word(void __iomem *, int);
 319static irqreturn_t dmfe_interrupt(int , void *);
 320#ifdef CONFIG_NET_POLL_CONTROLLER
 321static void poll_dmfe (struct net_device *dev);
 322#endif
 323static void dmfe_descriptor_init(struct net_device *);
 324static void allocate_rx_buffer(struct net_device *);
 325static void update_cr6(u32, void __iomem *);
 326static void send_filter_frame(struct net_device *);
 327static void dm9132_id_table(struct net_device *);
 328static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
 329static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
 330static void dmfe_phy_write_1bit(void __iomem *, u32);
 331static u16 dmfe_phy_read_1bit(void __iomem *);
 332static u8 dmfe_sense_speed(struct dmfe_board_info *);
 333static void dmfe_process_mode(struct dmfe_board_info *);
 334static void dmfe_timer(unsigned long);
 335static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
 336static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
 337static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
 338static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
 339static void dmfe_dynamic_reset(struct net_device *);
 340static void dmfe_free_rxbuffer(struct dmfe_board_info *);
 341static void dmfe_init_dm910x(struct net_device *);
 342static void dmfe_parse_srom(struct dmfe_board_info *);
 343static void dmfe_program_DM9801(struct dmfe_board_info *, int);
 344static void dmfe_program_DM9802(struct dmfe_board_info *);
 345static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
 346static void dmfe_set_phyxcer(struct dmfe_board_info *);
 347
 348/* DM910X network board routine ---------------------------- */
 349
 350static const struct net_device_ops netdev_ops = {
 351        .ndo_open               = dmfe_open,
 352        .ndo_stop               = dmfe_stop,
 353        .ndo_start_xmit         = dmfe_start_xmit,
 354        .ndo_set_rx_mode        = dmfe_set_filter_mode,
 355        .ndo_set_mac_address    = eth_mac_addr,
 356        .ndo_validate_addr      = eth_validate_addr,
 357#ifdef CONFIG_NET_POLL_CONTROLLER
 358        .ndo_poll_controller    = poll_dmfe,
 359#endif
 360};
 361
 362/*
 363 *      Search DM910X board ,allocate space and register it
 364 */
 365
 366static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 367{
 368        struct dmfe_board_info *db;     /* board information structure */
 369        struct net_device *dev;
 370        u32 pci_pmr;
 371        int i, err;
 372
 373        DMFE_DBUG(0, "dmfe_init_one()", 0);
 374
 375        if (!printed_version++)
 376                pr_info("%s\n", version);
 377
 378        /*
 379         *      SPARC on-board DM910x chips should be handled by the main
 380         *      tulip driver, except for early DM9100s.
 381         */
 382#ifdef CONFIG_TULIP_DM910X
 383        if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
 384            ent->driver_data == PCI_DM9102_ID) {
 385                struct device_node *dp = pci_device_to_OF_node(pdev);
 386
 387                if (dp && of_get_property(dp, "local-mac-address", NULL)) {
 388                        pr_info("skipping on-board DM910x (use tulip)\n");
 389                        return -ENODEV;
 390                }
 391        }
 392#endif
 393
 394        /* Init network device */
 395        dev = alloc_etherdev(sizeof(*db));
 396        if (dev == NULL)
 397                return -ENOMEM;
 398        SET_NETDEV_DEV(dev, &pdev->dev);
 399
 400        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 401                pr_warn("32-bit PCI DMA not available\n");
 402                err = -ENODEV;
 403                goto err_out_free;
 404        }
 405
 406        /* Enable Master/IO access, Disable memory access */
 407        err = pci_enable_device(pdev);
 408        if (err)
 409                goto err_out_free;
 410
 411        if (!pci_resource_start(pdev, 0)) {
 412                pr_err("I/O base is zero\n");
 413                err = -ENODEV;
 414                goto err_out_disable;
 415        }
 416
 417        if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
 418                pr_err("Allocated I/O size too small\n");
 419                err = -ENODEV;
 420                goto err_out_disable;
 421        }
 422
 423#if 0   /* pci_{enable_device,set_master} sets minimum latency for us now */
 424
 425        /* Set Latency Timer 80h */
 426        /* FIXME: setting values > 32 breaks some SiS 559x stuff.
 427           Need a PCI quirk.. */
 428
 429        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
 430#endif
 431
 432        if (pci_request_regions(pdev, DRV_NAME)) {
 433                pr_err("Failed to request PCI regions\n");
 434                err = -ENODEV;
 435                goto err_out_disable;
 436        }
 437
 438        /* Init system & device */
 439        db = netdev_priv(dev);
 440
 441        /* Allocate Tx/Rx descriptor memory */
 442        db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
 443                        DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
 444        if (!db->desc_pool_ptr) {
 445                err = -ENOMEM;
 446                goto err_out_res;
 447        }
 448
 449        db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
 450                        TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
 451        if (!db->buf_pool_ptr) {
 452                err = -ENOMEM;
 453                goto err_out_free_desc;
 454        }
 455
 456        db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 457        db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 458        db->buf_pool_start = db->buf_pool_ptr;
 459        db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 460
 461        db->chip_id = ent->driver_data;
 462        /* IO type range. */
 463        db->ioaddr = pci_iomap(pdev, 0, 0);
 464        if (!db->ioaddr) {
 465                err = -ENOMEM;
 466                goto err_out_free_buf;
 467        }
 468
 469        db->chip_revision = pdev->revision;
 470        db->wol_mode = 0;
 471
 472        db->pdev = pdev;
 473
 474        pci_set_drvdata(pdev, dev);
 475        dev->netdev_ops = &netdev_ops;
 476        dev->ethtool_ops = &netdev_ethtool_ops;
 477        netif_carrier_off(dev);
 478        spin_lock_init(&db->lock);
 479
 480        pci_read_config_dword(pdev, 0x50, &pci_pmr);
 481        pci_pmr &= 0x70000;
 482        if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
 483                db->chip_type = 1;      /* DM9102A E3 */
 484        else
 485                db->chip_type = 0;
 486
 487        /* read 64 word srom data */
 488        for (i = 0; i < 64; i++) {
 489                ((__le16 *) db->srom)[i] =
 490                        cpu_to_le16(read_srom_word(db->ioaddr, i));
 491        }
 492
 493        /* Set Node address */
 494        for (i = 0; i < 6; i++)
 495                dev->dev_addr[i] = db->srom[20 + i];
 496
 497        err = register_netdev (dev);
 498        if (err)
 499                goto err_out_unmap;
 500
 501        dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
 502                 ent->driver_data >> 16,
 503                 pci_name(pdev), dev->dev_addr, pdev->irq);
 504
 505        pci_set_master(pdev);
 506
 507        return 0;
 508
 509err_out_unmap:
 510        pci_iounmap(pdev, db->ioaddr);
 511err_out_free_buf:
 512        pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 513                            db->buf_pool_ptr, db->buf_pool_dma_ptr);
 514err_out_free_desc:
 515        pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 516                            db->desc_pool_ptr, db->desc_pool_dma_ptr);
 517err_out_res:
 518        pci_release_regions(pdev);
 519err_out_disable:
 520        pci_disable_device(pdev);
 521err_out_free:
 522        free_netdev(dev);
 523
 524        return err;
 525}
 526
 527
 528static void dmfe_remove_one(struct pci_dev *pdev)
 529{
 530        struct net_device *dev = pci_get_drvdata(pdev);
 531        struct dmfe_board_info *db = netdev_priv(dev);
 532
 533        DMFE_DBUG(0, "dmfe_remove_one()", 0);
 534
 535        if (dev) {
 536
 537                unregister_netdev(dev);
 538                pci_iounmap(db->pdev, db->ioaddr);
 539                pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 540                                        DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
 541                                        db->desc_pool_dma_ptr);
 542                pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 543                                        db->buf_pool_ptr, db->buf_pool_dma_ptr);
 544                pci_release_regions(pdev);
 545                free_netdev(dev);       /* free board information */
 546        }
 547
 548        DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
 549}
 550
 551
 552/*
 553 *      Open the interface.
 554 *      The interface is opened whenever "ifconfig" actives it.
 555 */
 556
 557static int dmfe_open(struct net_device *dev)
 558{
 559        struct dmfe_board_info *db = netdev_priv(dev);
 560        const int irq = db->pdev->irq;
 561        int ret;
 562
 563        DMFE_DBUG(0, "dmfe_open", 0);
 564
 565        ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
 566        if (ret)
 567                return ret;
 568
 569        /* system variable init */
 570        db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
 571        db->tx_packet_cnt = 0;
 572        db->tx_queue_cnt = 0;
 573        db->rx_avail_cnt = 0;
 574        db->wait_reset = 0;
 575
 576        db->first_in_callback = 0;
 577        db->NIC_capability = 0xf;       /* All capability*/
 578        db->PHY_reg4 = 0x1e0;
 579
 580        /* CR6 operation mode decision */
 581        if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
 582                (db->chip_revision >= 0x30) ) {
 583                db->cr6_data |= DMFE_TXTH_256;
 584                db->cr0_data = CR0_DEFAULT;
 585                db->dm910x_chk_mode=4;          /* Enter the normal mode */
 586        } else {
 587                db->cr6_data |= CR6_SFT;        /* Store & Forward mode */
 588                db->cr0_data = 0;
 589                db->dm910x_chk_mode = 1;        /* Enter the check mode */
 590        }
 591
 592        /* Initialize DM910X board */
 593        dmfe_init_dm910x(dev);
 594
 595        /* Active System Interface */
 596        netif_wake_queue(dev);
 597
 598        /* set and active a timer process */
 599        init_timer(&db->timer);
 600        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
 601        db->timer.data = (unsigned long)dev;
 602        db->timer.function = dmfe_timer;
 603        add_timer(&db->timer);
 604
 605        return 0;
 606}
 607
 608
 609/*      Initialize DM910X board
 610 *      Reset DM910X board
 611 *      Initialize TX/Rx descriptor chain structure
 612 *      Send the set-up frame
 613 *      Enable Tx/Rx machine
 614 */
 615
 616static void dmfe_init_dm910x(struct net_device *dev)
 617{
 618        struct dmfe_board_info *db = netdev_priv(dev);
 619        void __iomem *ioaddr = db->ioaddr;
 620
 621        DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 622
 623        /* Reset DM910x MAC controller */
 624        dw32(DCR0, DM910X_RESET);       /* RESET MAC */
 625        udelay(100);
 626        dw32(DCR0, db->cr0_data);
 627        udelay(5);
 628
 629        /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
 630        db->phy_addr = 1;
 631
 632        /* Parser SROM and media mode */
 633        dmfe_parse_srom(db);
 634        db->media_mode = dmfe_media_mode;
 635
 636        /* RESET Phyxcer Chip by GPR port bit 7 */
 637        dw32(DCR12, 0x180);             /* Let bit 7 output port */
 638        if (db->chip_id == PCI_DM9009_ID) {
 639                dw32(DCR12, 0x80);      /* Issue RESET signal */
 640                mdelay(300);                    /* Delay 300 ms */
 641        }
 642        dw32(DCR12, 0x0);       /* Clear RESET signal */
 643
 644        /* Process Phyxcer Media Mode */
 645        if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
 646                dmfe_set_phyxcer(db);
 647
 648        /* Media Mode Process */
 649        if ( !(db->media_mode & DMFE_AUTO) )
 650                db->op_mode = db->media_mode;   /* Force Mode */
 651
 652        /* Initialize Transmit/Receive descriptor and CR3/4 */
 653        dmfe_descriptor_init(dev);
 654
 655        /* Init CR6 to program DM910x operation */
 656        update_cr6(db->cr6_data, ioaddr);
 657
 658        /* Send setup frame */
 659        if (db->chip_id == PCI_DM9132_ID)
 660                dm9132_id_table(dev);   /* DM9132 */
 661        else
 662                send_filter_frame(dev); /* DM9102/DM9102A */
 663
 664        /* Init CR7, interrupt active bit */
 665        db->cr7_data = CR7_DEFAULT;
 666        dw32(DCR7, db->cr7_data);
 667
 668        /* Init CR15, Tx jabber and Rx watchdog timer */
 669        dw32(DCR15, db->cr15_data);
 670
 671        /* Enable DM910X Tx/Rx function */
 672        db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
 673        update_cr6(db->cr6_data, ioaddr);
 674}
 675
 676
 677/*
 678 *      Hardware start transmission.
 679 *      Send a packet to media from the upper layer.
 680 */
 681
 682static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 683                                         struct net_device *dev)
 684{
 685        struct dmfe_board_info *db = netdev_priv(dev);
 686        void __iomem *ioaddr = db->ioaddr;
 687        struct tx_desc *txptr;
 688        unsigned long flags;
 689
 690        DMFE_DBUG(0, "dmfe_start_xmit", 0);
 691
 692        /* Too large packet check */
 693        if (skb->len > MAX_PACKET_SIZE) {
 694                pr_err("big packet = %d\n", (u16)skb->len);
 695                dev_kfree_skb_any(skb);
 696                return NETDEV_TX_OK;
 697        }
 698
 699        /* Resource flag check */
 700        netif_stop_queue(dev);
 701
 702        spin_lock_irqsave(&db->lock, flags);
 703
 704        /* No Tx resource check, it never happen nromally */
 705        if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
 706                spin_unlock_irqrestore(&db->lock, flags);
 707                pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
 708                return NETDEV_TX_BUSY;
 709        }
 710
 711        /* Disable NIC interrupt */
 712        dw32(DCR7, 0);
 713
 714        /* transmit this packet */
 715        txptr = db->tx_insert_ptr;
 716        skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
 717        txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
 718
 719        /* Point to next transmit free descriptor */
 720        db->tx_insert_ptr = txptr->next_tx_desc;
 721
 722        /* Transmit Packet Process */
 723        if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
 724                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 725                db->tx_packet_cnt++;                    /* Ready to send */
 726                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 727                netif_trans_update(dev);                /* saved time stamp */
 728        } else {
 729                db->tx_queue_cnt++;                     /* queue TX packet */
 730                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 731        }
 732
 733        /* Tx resource check */
 734        if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
 735                netif_wake_queue(dev);
 736
 737        /* Restore CR7 to enable interrupt */
 738        spin_unlock_irqrestore(&db->lock, flags);
 739        dw32(DCR7, db->cr7_data);
 740
 741        /* free this SKB */
 742        dev_consume_skb_any(skb);
 743
 744        return NETDEV_TX_OK;
 745}
 746
 747
 748/*
 749 *      Stop the interface.
 750 *      The interface is stopped when it is brought.
 751 */
 752
 753static int dmfe_stop(struct net_device *dev)
 754{
 755        struct dmfe_board_info *db = netdev_priv(dev);
 756        void __iomem *ioaddr = db->ioaddr;
 757
 758        DMFE_DBUG(0, "dmfe_stop", 0);
 759
 760        /* disable system */
 761        netif_stop_queue(dev);
 762
 763        /* deleted timer */
 764        del_timer_sync(&db->timer);
 765
 766        /* Reset & stop DM910X board */
 767        dw32(DCR0, DM910X_RESET);
 768        udelay(100);
 769        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 770
 771        /* free interrupt */
 772        free_irq(db->pdev->irq, dev);
 773
 774        /* free allocated rx buffer */
 775        dmfe_free_rxbuffer(db);
 776
 777#if 0
 778        /* show statistic counter */
 779        printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
 780               db->tx_fifo_underrun, db->tx_excessive_collision,
 781               db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
 782               db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
 783               db->reset_fatal, db->reset_TXtimeout);
 784#endif
 785
 786        return 0;
 787}
 788
 789
 790/*
 791 *      DM9102 insterrupt handler
 792 *      receive the packet to upper layer, free the transmitted packet
 793 */
 794
 795static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 796{
 797        struct net_device *dev = dev_id;
 798        struct dmfe_board_info *db = netdev_priv(dev);
 799        void __iomem *ioaddr = db->ioaddr;
 800        unsigned long flags;
 801
 802        DMFE_DBUG(0, "dmfe_interrupt()", 0);
 803
 804        spin_lock_irqsave(&db->lock, flags);
 805
 806        /* Got DM910X status */
 807        db->cr5_data = dr32(DCR5);
 808        dw32(DCR5, db->cr5_data);
 809        if ( !(db->cr5_data & 0xc1) ) {
 810                spin_unlock_irqrestore(&db->lock, flags);
 811                return IRQ_HANDLED;
 812        }
 813
 814        /* Disable all interrupt in CR7 to solve the interrupt edge problem */
 815        dw32(DCR7, 0);
 816
 817        /* Check system status */
 818        if (db->cr5_data & 0x2000) {
 819                /* system bus error happen */
 820                DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
 821                db->reset_fatal++;
 822                db->wait_reset = 1;     /* Need to RESET */
 823                spin_unlock_irqrestore(&db->lock, flags);
 824                return IRQ_HANDLED;
 825        }
 826
 827         /* Received the coming packet */
 828        if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
 829                dmfe_rx_packet(dev, db);
 830
 831        /* reallocate rx descriptor buffer */
 832        if (db->rx_avail_cnt<RX_DESC_CNT)
 833                allocate_rx_buffer(dev);
 834
 835        /* Free the transmitted descriptor */
 836        if ( db->cr5_data & 0x01)
 837                dmfe_free_tx_pkt(dev, db);
 838
 839        /* Mode Check */
 840        if (db->dm910x_chk_mode & 0x2) {
 841                db->dm910x_chk_mode = 0x4;
 842                db->cr6_data |= 0x100;
 843                update_cr6(db->cr6_data, ioaddr);
 844        }
 845
 846        /* Restore CR7 to enable interrupt mask */
 847        dw32(DCR7, db->cr7_data);
 848
 849        spin_unlock_irqrestore(&db->lock, flags);
 850        return IRQ_HANDLED;
 851}
 852
 853
 854#ifdef CONFIG_NET_POLL_CONTROLLER
 855/*
 856 * Polling 'interrupt' - used by things like netconsole to send skbs
 857 * without having to re-enable interrupts. It's not called while
 858 * the interrupt routine is executing.
 859 */
 860
 861static void poll_dmfe (struct net_device *dev)
 862{
 863        struct dmfe_board_info *db = netdev_priv(dev);
 864        const int irq = db->pdev->irq;
 865
 866        /* disable_irq here is not very nice, but with the lockless
 867           interrupt handler we have no other choice. */
 868        disable_irq(irq);
 869        dmfe_interrupt (irq, dev);
 870        enable_irq(irq);
 871}
 872#endif
 873
 874/*
 875 *      Free TX resource after TX complete
 876 */
 877
 878static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
 879{
 880        struct tx_desc *txptr;
 881        void __iomem *ioaddr = db->ioaddr;
 882        u32 tdes0;
 883
 884        txptr = db->tx_remove_ptr;
 885        while(db->tx_packet_cnt) {
 886                tdes0 = le32_to_cpu(txptr->tdes0);
 887                if (tdes0 & 0x80000000)
 888                        break;
 889
 890                /* A packet sent completed */
 891                db->tx_packet_cnt--;
 892                dev->stats.tx_packets++;
 893
 894                /* Transmit statistic counter */
 895                if ( tdes0 != 0x7fffffff ) {
 896                        dev->stats.collisions += (tdes0 >> 3) & 0xf;
 897                        dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
 898                        if (tdes0 & TDES0_ERR_MASK) {
 899                                dev->stats.tx_errors++;
 900
 901                                if (tdes0 & 0x0002) {   /* UnderRun */
 902                                        db->tx_fifo_underrun++;
 903                                        if ( !(db->cr6_data & CR6_SFT) ) {
 904                                                db->cr6_data = db->cr6_data | CR6_SFT;
 905                                                update_cr6(db->cr6_data, ioaddr);
 906                                        }
 907                                }
 908                                if (tdes0 & 0x0100)
 909                                        db->tx_excessive_collision++;
 910                                if (tdes0 & 0x0200)
 911                                        db->tx_late_collision++;
 912                                if (tdes0 & 0x0400)
 913                                        db->tx_no_carrier++;
 914                                if (tdes0 & 0x0800)
 915                                        db->tx_loss_carrier++;
 916                                if (tdes0 & 0x4000)
 917                                        db->tx_jabber_timeout++;
 918                        }
 919                }
 920
 921                txptr = txptr->next_tx_desc;
 922        }/* End of while */
 923
 924        /* Update TX remove pointer to next */
 925        db->tx_remove_ptr = txptr;
 926
 927        /* Send the Tx packet in queue */
 928        if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
 929                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 930                db->tx_packet_cnt++;                    /* Ready to send */
 931                db->tx_queue_cnt--;
 932                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 933                netif_trans_update(dev);                /* saved time stamp */
 934        }
 935
 936        /* Resource available check */
 937        if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
 938                netif_wake_queue(dev);  /* Active upper layer, send again */
 939}
 940
 941
 942/*
 943 *      Calculate the CRC valude of the Rx packet
 944 *      flag =  1 : return the reverse CRC (for the received packet CRC)
 945 *              0 : return the normal CRC (for Hash Table index)
 946 */
 947
 948static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
 949{
 950        u32 crc = crc32(~0, Data, Len);
 951        if (flag) crc = ~crc;
 952        return crc;
 953}
 954
 955
 956/*
 957 *      Receive the come packet and pass to upper layer
 958 */
 959
 960static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
 961{
 962        struct rx_desc *rxptr;
 963        struct sk_buff *skb, *newskb;
 964        int rxlen;
 965        u32 rdes0;
 966
 967        rxptr = db->rx_ready_ptr;
 968
 969        while(db->rx_avail_cnt) {
 970                rdes0 = le32_to_cpu(rxptr->rdes0);
 971                if (rdes0 & 0x80000000) /* packet owner check */
 972                        break;
 973
 974                db->rx_avail_cnt--;
 975                db->interval_rx_cnt++;
 976
 977                pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
 978                                 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
 979
 980                if ( (rdes0 & 0x300) != 0x300) {
 981                        /* A packet without First/Last flag */
 982                        /* reuse this SKB */
 983                        DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
 984                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
 985                } else {
 986                        /* A packet with First/Last flag */
 987                        rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
 988
 989                        /* error summary bit check */
 990                        if (rdes0 & 0x8000) {
 991                                /* This is a error packet */
 992                                dev->stats.rx_errors++;
 993                                if (rdes0 & 1)
 994                                        dev->stats.rx_fifo_errors++;
 995                                if (rdes0 & 2)
 996                                        dev->stats.rx_crc_errors++;
 997                                if (rdes0 & 0x80)
 998                                        dev->stats.rx_length_errors++;
 999                        }
1000
1001                        if ( !(rdes0 & 0x8000) ||
1002                                ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1003                                skb = rxptr->rx_skb_ptr;
1004
1005                                /* Received Packet CRC check need or not */
1006                                if ( (db->dm910x_chk_mode & 1) &&
1007                                        (cal_CRC(skb->data, rxlen, 1) !=
1008                                        (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1009                                        /* Found a error received packet */
1010                                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1011                                        db->dm910x_chk_mode = 3;
1012                                } else {
1013                                        /* Good packet, send to upper layer */
1014                                        /* Shorst packet used new SKB */
1015                                        if ((rxlen < RX_COPY_SIZE) &&
1016                                                ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1017                                                != NULL)) {
1018
1019                                                skb = newskb;
1020                                                /* size less than COPY_SIZE, allocate a rxlen SKB */
1021                                                skb_reserve(skb, 2); /* 16byte align */
1022                                                skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1023                                                          skb_put(skb, rxlen),
1024                                                                          rxlen);
1025                                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1026                                        } else
1027                                                skb_put(skb, rxlen);
1028
1029                                        skb->protocol = eth_type_trans(skb, dev);
1030                                        netif_rx(skb);
1031                                        dev->stats.rx_packets++;
1032                                        dev->stats.rx_bytes += rxlen;
1033                                }
1034                        } else {
1035                                /* Reuse SKB buffer when the packet is error */
1036                                DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1037                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1038                        }
1039                }
1040
1041                rxptr = rxptr->next_rx_desc;
1042        }
1043
1044        db->rx_ready_ptr = rxptr;
1045}
1046
1047/*
1048 * Set DM910X multicast address
1049 */
1050
1051static void dmfe_set_filter_mode(struct net_device *dev)
1052{
1053        struct dmfe_board_info *db = netdev_priv(dev);
1054        unsigned long flags;
1055        int mc_count = netdev_mc_count(dev);
1056
1057        DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1058        spin_lock_irqsave(&db->lock, flags);
1059
1060        if (dev->flags & IFF_PROMISC) {
1061                DMFE_DBUG(0, "Enable PROM Mode", 0);
1062                db->cr6_data |= CR6_PM | CR6_PBF;
1063                update_cr6(db->cr6_data, db->ioaddr);
1064                spin_unlock_irqrestore(&db->lock, flags);
1065                return;
1066        }
1067
1068        if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1069                DMFE_DBUG(0, "Pass all multicast address", mc_count);
1070                db->cr6_data &= ~(CR6_PM | CR6_PBF);
1071                db->cr6_data |= CR6_PAM;
1072                spin_unlock_irqrestore(&db->lock, flags);
1073                return;
1074        }
1075
1076        DMFE_DBUG(0, "Set multicast address", mc_count);
1077        if (db->chip_id == PCI_DM9132_ID)
1078                dm9132_id_table(dev);   /* DM9132 */
1079        else
1080                send_filter_frame(dev); /* DM9102/DM9102A */
1081        spin_unlock_irqrestore(&db->lock, flags);
1082}
1083
1084/*
1085 *      Ethtool interace
1086 */
1087
1088static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1089                               struct ethtool_drvinfo *info)
1090{
1091        struct dmfe_board_info *np = netdev_priv(dev);
1092
1093        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1094        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1095        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1096}
1097
1098static int dmfe_ethtool_set_wol(struct net_device *dev,
1099                                struct ethtool_wolinfo *wolinfo)
1100{
1101        struct dmfe_board_info *db = netdev_priv(dev);
1102
1103        if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1104                                WAKE_ARP | WAKE_MAGICSECURE))
1105                   return -EOPNOTSUPP;
1106
1107        db->wol_mode = wolinfo->wolopts;
1108        return 0;
1109}
1110
1111static void dmfe_ethtool_get_wol(struct net_device *dev,
1112                                 struct ethtool_wolinfo *wolinfo)
1113{
1114        struct dmfe_board_info *db = netdev_priv(dev);
1115
1116        wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1117        wolinfo->wolopts = db->wol_mode;
1118}
1119
1120
1121static const struct ethtool_ops netdev_ethtool_ops = {
1122        .get_drvinfo            = dmfe_ethtool_get_drvinfo,
1123        .get_link               = ethtool_op_get_link,
1124        .set_wol                = dmfe_ethtool_set_wol,
1125        .get_wol                = dmfe_ethtool_get_wol,
1126};
1127
1128/*
1129 *      A periodic timer routine
1130 *      Dynamic media sense, allocate Rx buffer...
1131 */
1132
1133static void dmfe_timer(unsigned long data)
1134{
1135        struct net_device *dev = (struct net_device *)data;
1136        struct dmfe_board_info *db = netdev_priv(dev);
1137        void __iomem *ioaddr = db->ioaddr;
1138        u32 tmp_cr8;
1139        unsigned char tmp_cr12;
1140        unsigned long flags;
1141
1142        int link_ok, link_ok_phy;
1143
1144        DMFE_DBUG(0, "dmfe_timer()", 0);
1145        spin_lock_irqsave(&db->lock, flags);
1146
1147        /* Media mode process when Link OK before enter this route */
1148        if (db->first_in_callback == 0) {
1149                db->first_in_callback = 1;
1150                if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1151                        db->cr6_data &= ~0x40000;
1152                        update_cr6(db->cr6_data, ioaddr);
1153                        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1154                        db->cr6_data |= 0x40000;
1155                        update_cr6(db->cr6_data, ioaddr);
1156                        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1157                        add_timer(&db->timer);
1158                        spin_unlock_irqrestore(&db->lock, flags);
1159                        return;
1160                }
1161        }
1162
1163
1164        /* Operating Mode Check */
1165        if ( (db->dm910x_chk_mode & 0x1) &&
1166                (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1167                db->dm910x_chk_mode = 0x4;
1168
1169        /* Dynamic reset DM910X : system error or transmit time-out */
1170        tmp_cr8 = dr32(DCR8);
1171        if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1172                db->reset_cr8++;
1173                db->wait_reset = 1;
1174        }
1175        db->interval_rx_cnt = 0;
1176
1177        /* TX polling kick monitor */
1178        if ( db->tx_packet_cnt &&
1179             time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1180                dw32(DCR1, 0x1);   /* Tx polling again */
1181
1182                /* TX Timeout */
1183                if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1184                        db->reset_TXtimeout++;
1185                        db->wait_reset = 1;
1186                        dev_warn(&dev->dev, "Tx timeout - resetting\n");
1187                }
1188        }
1189
1190        if (db->wait_reset) {
1191                DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1192                db->reset_count++;
1193                dmfe_dynamic_reset(dev);
1194                db->first_in_callback = 0;
1195                db->timer.expires = DMFE_TIMER_WUT;
1196                add_timer(&db->timer);
1197                spin_unlock_irqrestore(&db->lock, flags);
1198                return;
1199        }
1200
1201        /* Link status check, Dynamic media type change */
1202        if (db->chip_id == PCI_DM9132_ID)
1203                tmp_cr12 = dr8(DCR9 + 3);       /* DM9132 */
1204        else
1205                tmp_cr12 = dr8(DCR12);          /* DM9102/DM9102A */
1206
1207        if ( ((db->chip_id == PCI_DM9102_ID) &&
1208                (db->chip_revision == 0x30)) ||
1209                ((db->chip_id == PCI_DM9132_ID) &&
1210                (db->chip_revision == 0x10)) ) {
1211                /* DM9102A Chip */
1212                if (tmp_cr12 & 2)
1213                        link_ok = 0;
1214                else
1215                        link_ok = 1;
1216        }
1217        else
1218                /*0x43 is used instead of 0x3 because bit 6 should represent
1219                        link status of external PHY */
1220                link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1221
1222
1223        /* If chip reports that link is failed it could be because external
1224                PHY link status pin is not connected correctly to chip
1225                To be sure ask PHY too.
1226        */
1227
1228        /* need a dummy read because of PHY's register latch*/
1229        dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1230        link_ok_phy = (dmfe_phy_read (db->ioaddr,
1231                                      db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1232
1233        if (link_ok_phy != link_ok) {
1234                DMFE_DBUG (0, "PHY and chip report different link status", 0);
1235                link_ok = link_ok | link_ok_phy;
1236        }
1237
1238        if ( !link_ok && netif_carrier_ok(dev)) {
1239                /* Link Failed */
1240                DMFE_DBUG(0, "Link Failed", tmp_cr12);
1241                netif_carrier_off(dev);
1242
1243                /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1244                /* AUTO or force 1M Homerun/Longrun don't need */
1245                if ( !(db->media_mode & 0x38) )
1246                        dmfe_phy_write(db->ioaddr, db->phy_addr,
1247                                       0, 0x1000, db->chip_id);
1248
1249                /* AUTO mode, if INT phyxcer link failed, select EXT device */
1250                if (db->media_mode & DMFE_AUTO) {
1251                        /* 10/100M link failed, used 1M Home-Net */
1252                        db->cr6_data|=0x00040000;       /* bit18=1, MII */
1253                        db->cr6_data&=~0x00000200;      /* bit9=0, HD mode */
1254                        update_cr6(db->cr6_data, ioaddr);
1255                }
1256        } else if (!netif_carrier_ok(dev)) {
1257
1258                DMFE_DBUG(0, "Link link OK", tmp_cr12);
1259
1260                /* Auto Sense Speed */
1261                if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1262                        netif_carrier_on(dev);
1263                        SHOW_MEDIA_TYPE(db->op_mode);
1264                }
1265
1266                dmfe_process_mode(db);
1267        }
1268
1269        /* HPNA remote command check */
1270        if (db->HPNA_command & 0xf00) {
1271                db->HPNA_timer--;
1272                if (!db->HPNA_timer)
1273                        dmfe_HPNA_remote_cmd_chk(db);
1274        }
1275
1276        /* Timer active again */
1277        db->timer.expires = DMFE_TIMER_WUT;
1278        add_timer(&db->timer);
1279        spin_unlock_irqrestore(&db->lock, flags);
1280}
1281
1282
1283/*
1284 *      Dynamic reset the DM910X board
1285 *      Stop DM910X board
1286 *      Free Tx/Rx allocated memory
1287 *      Reset DM910X board
1288 *      Re-initialize DM910X board
1289 */
1290
1291static void dmfe_dynamic_reset(struct net_device *dev)
1292{
1293        struct dmfe_board_info *db = netdev_priv(dev);
1294        void __iomem *ioaddr = db->ioaddr;
1295
1296        DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1297
1298        /* Sopt MAC controller */
1299        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1300        update_cr6(db->cr6_data, ioaddr);
1301        dw32(DCR7, 0);                          /* Disable Interrupt */
1302        dw32(DCR5, dr32(DCR5));
1303
1304        /* Disable upper layer interface */
1305        netif_stop_queue(dev);
1306
1307        /* Free Rx Allocate buffer */
1308        dmfe_free_rxbuffer(db);
1309
1310        /* system variable init */
1311        db->tx_packet_cnt = 0;
1312        db->tx_queue_cnt = 0;
1313        db->rx_avail_cnt = 0;
1314        netif_carrier_off(dev);
1315        db->wait_reset = 0;
1316
1317        /* Re-initialize DM910X board */
1318        dmfe_init_dm910x(dev);
1319
1320        /* Restart upper layer interface */
1321        netif_wake_queue(dev);
1322}
1323
1324
1325/*
1326 *      free all allocated rx buffer
1327 */
1328
1329static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1330{
1331        DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1332
1333        /* free allocated rx buffer */
1334        while (db->rx_avail_cnt) {
1335                dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1336                db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1337                db->rx_avail_cnt--;
1338        }
1339}
1340
1341
1342/*
1343 *      Reuse the SK buffer
1344 */
1345
1346static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1347{
1348        struct rx_desc *rxptr = db->rx_insert_ptr;
1349
1350        if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1351                rxptr->rx_skb_ptr = skb;
1352                rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1353                            skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1354                wmb();
1355                rxptr->rdes0 = cpu_to_le32(0x80000000);
1356                db->rx_avail_cnt++;
1357                db->rx_insert_ptr = rxptr->next_rx_desc;
1358        } else
1359                DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1360}
1361
1362
1363/*
1364 *      Initialize transmit/Receive descriptor
1365 *      Using Chain structure, and allocate Tx/Rx buffer
1366 */
1367
1368static void dmfe_descriptor_init(struct net_device *dev)
1369{
1370        struct dmfe_board_info *db = netdev_priv(dev);
1371        void __iomem *ioaddr = db->ioaddr;
1372        struct tx_desc *tmp_tx;
1373        struct rx_desc *tmp_rx;
1374        unsigned char *tmp_buf;
1375        dma_addr_t tmp_tx_dma, tmp_rx_dma;
1376        dma_addr_t tmp_buf_dma;
1377        int i;
1378
1379        DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1380
1381        /* tx descriptor start pointer */
1382        db->tx_insert_ptr = db->first_tx_desc;
1383        db->tx_remove_ptr = db->first_tx_desc;
1384        dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1385
1386        /* rx descriptor start pointer */
1387        db->first_rx_desc = (void *)db->first_tx_desc +
1388                        sizeof(struct tx_desc) * TX_DESC_CNT;
1389
1390        db->first_rx_desc_dma =  db->first_tx_desc_dma +
1391                        sizeof(struct tx_desc) * TX_DESC_CNT;
1392        db->rx_insert_ptr = db->first_rx_desc;
1393        db->rx_ready_ptr = db->first_rx_desc;
1394        dw32(DCR3, db->first_rx_desc_dma);              /* RX DESC address */
1395
1396        /* Init Transmit chain */
1397        tmp_buf = db->buf_pool_start;
1398        tmp_buf_dma = db->buf_pool_dma_start;
1399        tmp_tx_dma = db->first_tx_desc_dma;
1400        for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1401                tmp_tx->tx_buf_ptr = tmp_buf;
1402                tmp_tx->tdes0 = cpu_to_le32(0);
1403                tmp_tx->tdes1 = cpu_to_le32(0x81000000);        /* IC, chain */
1404                tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1405                tmp_tx_dma += sizeof(struct tx_desc);
1406                tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1407                tmp_tx->next_tx_desc = tmp_tx + 1;
1408                tmp_buf = tmp_buf + TX_BUF_ALLOC;
1409                tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1410        }
1411        (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1412        tmp_tx->next_tx_desc = db->first_tx_desc;
1413
1414         /* Init Receive descriptor chain */
1415        tmp_rx_dma=db->first_rx_desc_dma;
1416        for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1417                tmp_rx->rdes0 = cpu_to_le32(0);
1418                tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1419                tmp_rx_dma += sizeof(struct rx_desc);
1420                tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1421                tmp_rx->next_rx_desc = tmp_rx + 1;
1422        }
1423        (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1424        tmp_rx->next_rx_desc = db->first_rx_desc;
1425
1426        /* pre-allocate Rx buffer */
1427        allocate_rx_buffer(dev);
1428}
1429
1430
1431/*
1432 *      Update CR6 value
1433 *      Firstly stop DM910X , then written value and start
1434 */
1435
1436static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1437{
1438        u32 cr6_tmp;
1439
1440        cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1441        dw32(DCR6, cr6_tmp);
1442        udelay(5);
1443        dw32(DCR6, cr6_data);
1444        udelay(5);
1445}
1446
1447
1448/*
1449 *      Send a setup frame for DM9132
1450 *      This setup frame initialize DM910X address filter mode
1451*/
1452
1453static void dm9132_id_table(struct net_device *dev)
1454{
1455        struct dmfe_board_info *db = netdev_priv(dev);
1456        void __iomem *ioaddr = db->ioaddr + 0xc0;
1457        u16 *addrptr = (u16 *)dev->dev_addr;
1458        struct netdev_hw_addr *ha;
1459        u16 i, hash_table[4];
1460
1461        /* Node address */
1462        for (i = 0; i < 3; i++) {
1463                dw16(0, addrptr[i]);
1464                ioaddr += 4;
1465        }
1466
1467        /* Clear Hash Table */
1468        memset(hash_table, 0, sizeof(hash_table));
1469
1470        /* broadcast address */
1471        hash_table[3] = 0x8000;
1472
1473        /* the multicast address in Hash Table : 64 bits */
1474        netdev_for_each_mc_addr(ha, dev) {
1475                u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1476
1477                hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1478        }
1479
1480        /* Write the hash table to MAC MD table */
1481        for (i = 0; i < 4; i++, ioaddr += 4)
1482                dw16(0, hash_table[i]);
1483}
1484
1485
1486/*
1487 *      Send a setup frame for DM9102/DM9102A
1488 *      This setup frame initialize DM910X address filter mode
1489 */
1490
1491static void send_filter_frame(struct net_device *dev)
1492{
1493        struct dmfe_board_info *db = netdev_priv(dev);
1494        struct netdev_hw_addr *ha;
1495        struct tx_desc *txptr;
1496        u16 * addrptr;
1497        u32 * suptr;
1498        int i;
1499
1500        DMFE_DBUG(0, "send_filter_frame()", 0);
1501
1502        txptr = db->tx_insert_ptr;
1503        suptr = (u32 *) txptr->tx_buf_ptr;
1504
1505        /* Node address */
1506        addrptr = (u16 *) dev->dev_addr;
1507        *suptr++ = addrptr[0];
1508        *suptr++ = addrptr[1];
1509        *suptr++ = addrptr[2];
1510
1511        /* broadcast address */
1512        *suptr++ = 0xffff;
1513        *suptr++ = 0xffff;
1514        *suptr++ = 0xffff;
1515
1516        /* fit the multicast address */
1517        netdev_for_each_mc_addr(ha, dev) {
1518                addrptr = (u16 *) ha->addr;
1519                *suptr++ = addrptr[0];
1520                *suptr++ = addrptr[1];
1521                *suptr++ = addrptr[2];
1522        }
1523
1524        for (i = netdev_mc_count(dev); i < 14; i++) {
1525                *suptr++ = 0xffff;
1526                *suptr++ = 0xffff;
1527                *suptr++ = 0xffff;
1528        }
1529
1530        /* prepare the setup frame */
1531        db->tx_insert_ptr = txptr->next_tx_desc;
1532        txptr->tdes1 = cpu_to_le32(0x890000c0);
1533
1534        /* Resource Check and Send the setup packet */
1535        if (!db->tx_packet_cnt) {
1536                void __iomem *ioaddr = db->ioaddr;
1537
1538                /* Resource Empty */
1539                db->tx_packet_cnt++;
1540                txptr->tdes0 = cpu_to_le32(0x80000000);
1541                update_cr6(db->cr6_data | 0x2000, ioaddr);
1542                dw32(DCR1, 0x1);        /* Issue Tx polling */
1543                update_cr6(db->cr6_data, ioaddr);
1544                netif_trans_update(dev);
1545        } else
1546                db->tx_queue_cnt++;     /* Put in TX queue */
1547}
1548
1549
1550/*
1551 *      Allocate rx buffer,
1552 *      As possible as allocate maxiumn Rx buffer
1553 */
1554
1555static void allocate_rx_buffer(struct net_device *dev)
1556{
1557        struct dmfe_board_info *db = netdev_priv(dev);
1558        struct rx_desc *rxptr;
1559        struct sk_buff *skb;
1560
1561        rxptr = db->rx_insert_ptr;
1562
1563        while(db->rx_avail_cnt < RX_DESC_CNT) {
1564                if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1565                        break;
1566                rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1567                rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1568                                    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1569                wmb();
1570                rxptr->rdes0 = cpu_to_le32(0x80000000);
1571                rxptr = rxptr->next_rx_desc;
1572                db->rx_avail_cnt++;
1573        }
1574
1575        db->rx_insert_ptr = rxptr;
1576}
1577
1578static void srom_clk_write(void __iomem *ioaddr, u32 data)
1579{
1580        static const u32 cmd[] = {
1581                CR9_SROM_READ | CR9_SRCS,
1582                CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1583                CR9_SROM_READ | CR9_SRCS
1584        };
1585        int i;
1586
1587        for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1588                dw32(DCR9, data | cmd[i]);
1589                udelay(5);
1590        }
1591}
1592
1593/*
1594 *      Read one word data from the serial ROM
1595 */
1596static u16 read_srom_word(void __iomem *ioaddr, int offset)
1597{
1598        u16 srom_data;
1599        int i;
1600
1601        dw32(DCR9, CR9_SROM_READ);
1602        udelay(5);
1603        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1604        udelay(5);
1605
1606        /* Send the Read Command 110b */
1607        srom_clk_write(ioaddr, SROM_DATA_1);
1608        srom_clk_write(ioaddr, SROM_DATA_1);
1609        srom_clk_write(ioaddr, SROM_DATA_0);
1610
1611        /* Send the offset */
1612        for (i = 5; i >= 0; i--) {
1613                srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1614                srom_clk_write(ioaddr, srom_data);
1615        }
1616
1617        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1618        udelay(5);
1619
1620        for (i = 16; i > 0; i--) {
1621                dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1622                udelay(5);
1623                srom_data = (srom_data << 1) |
1624                                ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1625                dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1626                udelay(5);
1627        }
1628
1629        dw32(DCR9, CR9_SROM_READ);
1630        udelay(5);
1631        return srom_data;
1632}
1633
1634
1635/*
1636 *      Auto sense the media mode
1637 */
1638
1639static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1640{
1641        void __iomem *ioaddr = db->ioaddr;
1642        u8 ErrFlag = 0;
1643        u16 phy_mode;
1644
1645        /* CR6 bit18=0, select 10/100M */
1646        update_cr6(db->cr6_data & ~0x40000, ioaddr);
1647
1648        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1649        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1650
1651        if ( (phy_mode & 0x24) == 0x24 ) {
1652                if (db->chip_id == PCI_DM9132_ID)       /* DM9132 */
1653                        phy_mode = dmfe_phy_read(db->ioaddr,
1654                                                 db->phy_addr, 7, db->chip_id) & 0xf000;
1655                else                            /* DM9102/DM9102A */
1656                        phy_mode = dmfe_phy_read(db->ioaddr,
1657                                                 db->phy_addr, 17, db->chip_id) & 0xf000;
1658                switch (phy_mode) {
1659                case 0x1000: db->op_mode = DMFE_10MHF; break;
1660                case 0x2000: db->op_mode = DMFE_10MFD; break;
1661                case 0x4000: db->op_mode = DMFE_100MHF; break;
1662                case 0x8000: db->op_mode = DMFE_100MFD; break;
1663                default: db->op_mode = DMFE_10MHF;
1664                        ErrFlag = 1;
1665                        break;
1666                }
1667        } else {
1668                db->op_mode = DMFE_10MHF;
1669                DMFE_DBUG(0, "Link Failed :", phy_mode);
1670                ErrFlag = 1;
1671        }
1672
1673        return ErrFlag;
1674}
1675
1676
1677/*
1678 *      Set 10/100 phyxcer capability
1679 *      AUTO mode : phyxcer register4 is NIC capability
1680 *      Force mode: phyxcer register4 is the force media
1681 */
1682
1683static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1684{
1685        void __iomem *ioaddr = db->ioaddr;
1686        u16 phy_reg;
1687
1688        /* Select 10/100M phyxcer */
1689        db->cr6_data &= ~0x40000;
1690        update_cr6(db->cr6_data, ioaddr);
1691
1692        /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1693        if (db->chip_id == PCI_DM9009_ID) {
1694                phy_reg = dmfe_phy_read(db->ioaddr,
1695                                        db->phy_addr, 18, db->chip_id) & ~0x1000;
1696
1697                dmfe_phy_write(db->ioaddr,
1698                               db->phy_addr, 18, phy_reg, db->chip_id);
1699        }
1700
1701        /* Phyxcer capability setting */
1702        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1703
1704        if (db->media_mode & DMFE_AUTO) {
1705                /* AUTO Mode */
1706                phy_reg |= db->PHY_reg4;
1707        } else {
1708                /* Force Mode */
1709                switch(db->media_mode) {
1710                case DMFE_10MHF: phy_reg |= 0x20; break;
1711                case DMFE_10MFD: phy_reg |= 0x40; break;
1712                case DMFE_100MHF: phy_reg |= 0x80; break;
1713                case DMFE_100MFD: phy_reg |= 0x100; break;
1714                }
1715                if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1716        }
1717
1718        /* Write new capability to Phyxcer Reg4 */
1719        if ( !(phy_reg & 0x01e0)) {
1720                phy_reg|=db->PHY_reg4;
1721                db->media_mode|=DMFE_AUTO;
1722        }
1723        dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1724
1725        /* Restart Auto-Negotiation */
1726        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1727                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1728        if ( !db->chip_type )
1729                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1730}
1731
1732
1733/*
1734 *      Process op-mode
1735 *      AUTO mode : PHY controller in Auto-negotiation Mode
1736 *      Force mode: PHY controller in force mode with HUB
1737 *                      N-way force capability with SWITCH
1738 */
1739
1740static void dmfe_process_mode(struct dmfe_board_info *db)
1741{
1742        u16 phy_reg;
1743
1744        /* Full Duplex Mode Check */
1745        if (db->op_mode & 0x4)
1746                db->cr6_data |= CR6_FDM;        /* Set Full Duplex Bit */
1747        else
1748                db->cr6_data &= ~CR6_FDM;       /* Clear Full Duplex Bit */
1749
1750        /* Transciver Selection */
1751        if (db->op_mode & 0x10)         /* 1M HomePNA */
1752                db->cr6_data |= 0x40000;/* External MII select */
1753        else
1754                db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1755
1756        update_cr6(db->cr6_data, db->ioaddr);
1757
1758        /* 10/100M phyxcer force mode need */
1759        if ( !(db->media_mode & 0x18)) {
1760                /* Forece Mode */
1761                phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1762                if ( !(phy_reg & 0x1) ) {
1763                        /* parter without N-Way capability */
1764                        phy_reg = 0x0;
1765                        switch(db->op_mode) {
1766                        case DMFE_10MHF: phy_reg = 0x0; break;
1767                        case DMFE_10MFD: phy_reg = 0x100; break;
1768                        case DMFE_100MHF: phy_reg = 0x2000; break;
1769                        case DMFE_100MFD: phy_reg = 0x2100; break;
1770                        }
1771                        dmfe_phy_write(db->ioaddr,
1772                                       db->phy_addr, 0, phy_reg, db->chip_id);
1773                        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1774                                mdelay(20);
1775                        dmfe_phy_write(db->ioaddr,
1776                                       db->phy_addr, 0, phy_reg, db->chip_id);
1777                }
1778        }
1779}
1780
1781
1782/*
1783 *      Write a word to Phy register
1784 */
1785
1786static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1787                           u16 phy_data, u32 chip_id)
1788{
1789        u16 i;
1790
1791        if (chip_id == PCI_DM9132_ID) {
1792                dw16(0x80 + offset * 4, phy_data);
1793        } else {
1794                /* DM9102/DM9102A Chip */
1795
1796                /* Send 33 synchronization clock to Phy controller */
1797                for (i = 0; i < 35; i++)
1798                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1799
1800                /* Send start command(01) to Phy */
1801                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1802                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1803
1804                /* Send write command(01) to Phy */
1805                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1806                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1807
1808                /* Send Phy address */
1809                for (i = 0x10; i > 0; i = i >> 1)
1810                        dmfe_phy_write_1bit(ioaddr,
1811                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1812
1813                /* Send register address */
1814                for (i = 0x10; i > 0; i = i >> 1)
1815                        dmfe_phy_write_1bit(ioaddr,
1816                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1817
1818                /* written trasnition */
1819                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1820                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1821
1822                /* Write a word data to PHY controller */
1823                for ( i = 0x8000; i > 0; i >>= 1)
1824                        dmfe_phy_write_1bit(ioaddr,
1825                                            phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1826        }
1827}
1828
1829
1830/*
1831 *      Read a word data from phy register
1832 */
1833
1834static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1835{
1836        int i;
1837        u16 phy_data;
1838
1839        if (chip_id == PCI_DM9132_ID) {
1840                /* DM9132 Chip */
1841                phy_data = dr16(0x80 + offset * 4);
1842        } else {
1843                /* DM9102/DM9102A Chip */
1844
1845                /* Send 33 synchronization clock to Phy controller */
1846                for (i = 0; i < 35; i++)
1847                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1848
1849                /* Send start command(01) to Phy */
1850                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1851                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1852
1853                /* Send read command(10) to Phy */
1854                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1855                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1856
1857                /* Send Phy address */
1858                for (i = 0x10; i > 0; i = i >> 1)
1859                        dmfe_phy_write_1bit(ioaddr,
1860                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1861
1862                /* Send register address */
1863                for (i = 0x10; i > 0; i = i >> 1)
1864                        dmfe_phy_write_1bit(ioaddr,
1865                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1866
1867                /* Skip transition state */
1868                dmfe_phy_read_1bit(ioaddr);
1869
1870                /* read 16bit data */
1871                for (phy_data = 0, i = 0; i < 16; i++) {
1872                        phy_data <<= 1;
1873                        phy_data |= dmfe_phy_read_1bit(ioaddr);
1874                }
1875        }
1876
1877        return phy_data;
1878}
1879
1880
1881/*
1882 *      Write one bit data to Phy Controller
1883 */
1884
1885static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1886{
1887        dw32(DCR9, phy_data);           /* MII Clock Low */
1888        udelay(1);
1889        dw32(DCR9, phy_data | MDCLKH);  /* MII Clock High */
1890        udelay(1);
1891        dw32(DCR9, phy_data);           /* MII Clock Low */
1892        udelay(1);
1893}
1894
1895
1896/*
1897 *      Read one bit phy data from PHY controller
1898 */
1899
1900static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1901{
1902        u16 phy_data;
1903
1904        dw32(DCR9, 0x50000);
1905        udelay(1);
1906        phy_data = (dr32(DCR9) >> 19) & 0x1;
1907        dw32(DCR9, 0x40000);
1908        udelay(1);
1909
1910        return phy_data;
1911}
1912
1913
1914/*
1915 *      Parser SROM and media mode
1916 */
1917
1918static void dmfe_parse_srom(struct dmfe_board_info * db)
1919{
1920        char * srom = db->srom;
1921        int dmfe_mode, tmp_reg;
1922
1923        DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1924
1925        /* Init CR15 */
1926        db->cr15_data = CR15_DEFAULT;
1927
1928        /* Check SROM Version */
1929        if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1930                /* SROM V4.01 */
1931                /* Get NIC support media mode */
1932                db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1933                db->PHY_reg4 = 0;
1934                for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1935                        switch( db->NIC_capability & tmp_reg ) {
1936                        case 0x1: db->PHY_reg4 |= 0x0020; break;
1937                        case 0x2: db->PHY_reg4 |= 0x0040; break;
1938                        case 0x4: db->PHY_reg4 |= 0x0080; break;
1939                        case 0x8: db->PHY_reg4 |= 0x0100; break;
1940                        }
1941                }
1942
1943                /* Media Mode Force or not check */
1944                dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1945                             le32_to_cpup((__le32 *) (srom + 36)));
1946                switch(dmfe_mode) {
1947                case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1948                case 0x2: dmfe_media_mode = DMFE_10MFD; break;  /* 10MFD */
1949                case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1950                case 0x100:
1951                case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1952                }
1953
1954                /* Special Function setting */
1955                /* VLAN function */
1956                if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1957                        db->cr15_data |= 0x40;
1958
1959                /* Flow Control */
1960                if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1961                        db->cr15_data |= 0x400;
1962
1963                /* TX pause packet */
1964                if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1965                        db->cr15_data |= 0x9800;
1966        }
1967
1968        /* Parse HPNA parameter */
1969        db->HPNA_command = 1;
1970
1971        /* Accept remote command or not */
1972        if (HPNA_rx_cmd == 0)
1973                db->HPNA_command |= 0x8000;
1974
1975         /* Issue remote command & operation mode */
1976        if (HPNA_tx_cmd == 1)
1977                switch(HPNA_mode) {     /* Issue Remote Command */
1978                case 0: db->HPNA_command |= 0x0904; break;
1979                case 1: db->HPNA_command |= 0x0a00; break;
1980                case 2: db->HPNA_command |= 0x0506; break;
1981                case 3: db->HPNA_command |= 0x0602; break;
1982                }
1983        else
1984                switch(HPNA_mode) {     /* Don't Issue */
1985                case 0: db->HPNA_command |= 0x0004; break;
1986                case 1: db->HPNA_command |= 0x0000; break;
1987                case 2: db->HPNA_command |= 0x0006; break;
1988                case 3: db->HPNA_command |= 0x0002; break;
1989                }
1990
1991        /* Check DM9801 or DM9802 present or not */
1992        db->HPNA_present = 0;
1993        update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1994        tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1995        if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1996                /* DM9801 or DM9802 present */
1997                db->HPNA_timer = 8;
1998                if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1999                        /* DM9801 HomeRun */
2000                        db->HPNA_present = 1;
2001                        dmfe_program_DM9801(db, tmp_reg);
2002                } else {
2003                        /* DM9802 LongRun */
2004                        db->HPNA_present = 2;
2005                        dmfe_program_DM9802(db);
2006                }
2007        }
2008
2009}
2010
2011
2012/*
2013 *      Init HomeRun DM9801
2014 */
2015
2016static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2017{
2018        uint reg17, reg25;
2019
2020        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2021        switch(HPNA_rev) {
2022        case 0xb900: /* DM9801 E3 */
2023                db->HPNA_command |= 0x1000;
2024                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2025                reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2026                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2027                break;
2028        case 0xb901: /* DM9801 E4 */
2029                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2030                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2031                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2032                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2033                break;
2034        case 0xb902: /* DM9801 E5 */
2035        case 0xb903: /* DM9801 E6 */
2036        default:
2037                db->HPNA_command |= 0x1000;
2038                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2039                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2040                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2041                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2042                break;
2043        }
2044        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2045        dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2046        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2047}
2048
2049
2050/*
2051 *      Init HomeRun DM9802
2052 */
2053
2054static void dmfe_program_DM9802(struct dmfe_board_info * db)
2055{
2056        uint phy_reg;
2057
2058        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2059        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2060        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2061        phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2062        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2063}
2064
2065
2066/*
2067 *      Check remote HPNA power and speed status. If not correct,
2068 *      issue command again.
2069*/
2070
2071static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2072{
2073        uint phy_reg;
2074
2075        /* Got remote device status */
2076        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2077        switch(phy_reg) {
2078        case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2079        case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2080        case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2081        case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2082        }
2083
2084        /* Check remote device status match our setting ot not */
2085        if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2086                dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2087                               db->chip_id);
2088                db->HPNA_timer=8;
2089        } else
2090                db->HPNA_timer=600;     /* Match, every 10 minutes, check */
2091}
2092
2093
2094
2095static const struct pci_device_id dmfe_pci_tbl[] = {
2096        { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2097        { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2098        { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2099        { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2100        { 0, }
2101};
2102MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2103
2104
2105#ifdef CONFIG_PM
2106static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2107{
2108        struct net_device *dev = pci_get_drvdata(pci_dev);
2109        struct dmfe_board_info *db = netdev_priv(dev);
2110        void __iomem *ioaddr = db->ioaddr;
2111        u32 tmp;
2112
2113        /* Disable upper layer interface */
2114        netif_device_detach(dev);
2115
2116        /* Disable Tx/Rx */
2117        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2118        update_cr6(db->cr6_data, ioaddr);
2119
2120        /* Disable Interrupt */
2121        dw32(DCR7, 0);
2122        dw32(DCR5, dr32(DCR5));
2123
2124        /* Fre RX buffers */
2125        dmfe_free_rxbuffer(db);
2126
2127        /* Enable WOL */
2128        pci_read_config_dword(pci_dev, 0x40, &tmp);
2129        tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2130
2131        if (db->wol_mode & WAKE_PHY)
2132                tmp |= DMFE_WOL_LINKCHANGE;
2133        if (db->wol_mode & WAKE_MAGIC)
2134                tmp |= DMFE_WOL_MAGICPACKET;
2135
2136        pci_write_config_dword(pci_dev, 0x40, tmp);
2137
2138        pci_enable_wake(pci_dev, PCI_D3hot, 1);
2139        pci_enable_wake(pci_dev, PCI_D3cold, 1);
2140
2141        /* Power down device*/
2142        pci_save_state(pci_dev);
2143        pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2144
2145        return 0;
2146}
2147
2148static int dmfe_resume(struct pci_dev *pci_dev)
2149{
2150        struct net_device *dev = pci_get_drvdata(pci_dev);
2151        u32 tmp;
2152
2153        pci_set_power_state(pci_dev, PCI_D0);
2154        pci_restore_state(pci_dev);
2155
2156        /* Re-initialize DM910X board */
2157        dmfe_init_dm910x(dev);
2158
2159        /* Disable WOL */
2160        pci_read_config_dword(pci_dev, 0x40, &tmp);
2161
2162        tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2163        pci_write_config_dword(pci_dev, 0x40, tmp);
2164
2165        pci_enable_wake(pci_dev, PCI_D3hot, 0);
2166        pci_enable_wake(pci_dev, PCI_D3cold, 0);
2167
2168        /* Restart upper layer interface */
2169        netif_device_attach(dev);
2170
2171        return 0;
2172}
2173#else
2174#define dmfe_suspend NULL
2175#define dmfe_resume NULL
2176#endif
2177
2178static struct pci_driver dmfe_driver = {
2179        .name           = "dmfe",
2180        .id_table       = dmfe_pci_tbl,
2181        .probe          = dmfe_init_one,
2182        .remove         = dmfe_remove_one,
2183        .suspend        = dmfe_suspend,
2184        .resume         = dmfe_resume
2185};
2186
2187MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2188MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2189MODULE_LICENSE("GPL");
2190MODULE_VERSION(DRV_VERSION);
2191
2192module_param(debug, int, 0);
2193module_param(mode, byte, 0);
2194module_param(cr6set, int, 0);
2195module_param(chkmode, byte, 0);
2196module_param(HPNA_mode, byte, 0);
2197module_param(HPNA_rx_cmd, byte, 0);
2198module_param(HPNA_tx_cmd, byte, 0);
2199module_param(HPNA_NoiseFloor, byte, 0);
2200module_param(SF_mode, byte, 0);
2201MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2202MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2203                "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2204
2205MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2206                "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2207
2208/*      Description:
2209 *      when user used insmod to add module, system invoked init_module()
2210 *      to initialize and register.
2211 */
2212
2213static int __init dmfe_init_module(void)
2214{
2215        int rc;
2216
2217        pr_info("%s\n", version);
2218        printed_version = 1;
2219
2220        DMFE_DBUG(0, "init_module() ", debug);
2221
2222        if (debug)
2223                dmfe_debug = debug;     /* set debug flag */
2224        if (cr6set)
2225                dmfe_cr6_user_set = cr6set;
2226
2227        switch(mode) {
2228        case DMFE_10MHF:
2229        case DMFE_100MHF:
2230        case DMFE_10MFD:
2231        case DMFE_100MFD:
2232        case DMFE_1M_HPNA:
2233                dmfe_media_mode = mode;
2234                break;
2235        default:dmfe_media_mode = DMFE_AUTO;
2236                break;
2237        }
2238
2239        if (HPNA_mode > 4)
2240                HPNA_mode = 0;          /* Default: LP/HS */
2241        if (HPNA_rx_cmd > 1)
2242                HPNA_rx_cmd = 0;        /* Default: Ignored remote cmd */
2243        if (HPNA_tx_cmd > 1)
2244                HPNA_tx_cmd = 0;        /* Default: Don't issue remote cmd */
2245        if (HPNA_NoiseFloor > 15)
2246                HPNA_NoiseFloor = 0;
2247
2248        rc = pci_register_driver(&dmfe_driver);
2249        if (rc < 0)
2250                return rc;
2251
2252        return 0;
2253}
2254
2255
2256/*
2257 *      Description:
2258 *      when user used rmmod to delete module, system invoked clean_module()
2259 *      to un-register all registered services.
2260 */
2261
2262static void __exit dmfe_cleanup_module(void)
2263{
2264        DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2265        pci_unregister_driver(&dmfe_driver);
2266}
2267
2268module_init(dmfe_init_module);
2269module_exit(dmfe_cleanup_module);
2270