linux/drivers/net/ethernet/dec/tulip/dmfe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
   4    ethernet driver for Linux.
   5    Copyright (C) 1997  Sten Wang
   6
   7
   8    DAVICOM Web-Site: www.davicom.com.tw
   9
  10    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
  11    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
  12
  13    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
  14
  15    Marcelo Tosatti <marcelo@conectiva.com.br> :
  16    Made it compile in 2.3 (device to net_device)
  17
  18    Alan Cox <alan@lxorguk.ukuu.org.uk> :
  19    Cleaned up for kernel merge.
  20    Removed the back compatibility support
  21    Reformatted, fixing spelling etc as I went
  22    Removed IRQ 0-15 assumption
  23
  24    Jeff Garzik <jgarzik@pobox.com> :
  25    Updated to use new PCI driver API.
  26    Resource usage cleanups.
  27    Report driver version to user.
  28
  29    Tobias Ringstrom <tori@unhappy.mine.nu> :
  30    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
  31    Andrew Morton and Frank Davis for the SMP safety fixes.
  32
  33    Vojtech Pavlik <vojtech@suse.cz> :
  34    Cleaned up pointer arithmetics.
  35    Fixed a lot of 64bit issues.
  36    Cleaned up printk()s a bit.
  37    Fixed some obvious big endian problems.
  38
  39    Tobias Ringstrom <tori@unhappy.mine.nu> :
  40    Use time_after for jiffies calculation.  Added ethtool
  41    support.  Updated PCI resource allocation.  Do not
  42    forget to unmap PCI mapped skbs.
  43
  44    Alan Cox <alan@lxorguk.ukuu.org.uk>
  45    Added new PCI identifiers provided by Clear Zhang at ALi
  46    for their 1563 ethernet device.
  47
  48    TODO
  49
  50    Check on 64 bit boxes.
  51    Check and fix on big endian boxes.
  52
  53    Test and make sure PCI latency is now correct for all cases.
  54*/
  55
  56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  57
  58#define DRV_NAME        "dmfe"
  59#define DRV_VERSION     "1.36.4"
  60#define DRV_RELDATE     "2002-01-17"
  61
  62#include <linux/module.h>
  63#include <linux/kernel.h>
  64#include <linux/string.h>
  65#include <linux/timer.h>
  66#include <linux/ptrace.h>
  67#include <linux/errno.h>
  68#include <linux/ioport.h>
  69#include <linux/interrupt.h>
  70#include <linux/pci.h>
  71#include <linux/dma-mapping.h>
  72#include <linux/init.h>
  73#include <linux/netdevice.h>
  74#include <linux/etherdevice.h>
  75#include <linux/ethtool.h>
  76#include <linux/skbuff.h>
  77#include <linux/delay.h>
  78#include <linux/spinlock.h>
  79#include <linux/crc32.h>
  80#include <linux/bitops.h>
  81
  82#include <asm/processor.h>
  83#include <asm/io.h>
  84#include <asm/dma.h>
  85#include <linux/uaccess.h>
  86#include <asm/irq.h>
  87
  88#ifdef CONFIG_TULIP_DM910X
  89#include <linux/of.h>
  90#endif
  91
  92
  93/* Board/System/Debug information/definition ---------------- */
  94#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
  95#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
  96#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
  97#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
  98
  99#define DM9102_IO_SIZE  0x80
 100#define DM9102A_IO_SIZE 0x100
 101#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
 102#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
 103#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
 104#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)      /* Max TX packet count */
 105#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)      /* TX wakeup count */
 106#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
 107#define TX_BUF_ALLOC    0x600
 108#define RX_ALLOC_SIZE   0x620
 109#define DM910X_RESET    1
 110#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
 111#define CR6_DEFAULT     0x00080000      /* HD */
 112#define CR7_DEFAULT     0x180c1
 113#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
 114#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
 115#define MAX_PACKET_SIZE 1514
 116#define DMFE_MAX_MULTICAST 14
 117#define RX_COPY_SIZE    100
 118#define MAX_CHECK_PACKET 0x8000
 119#define DM9801_NOISE_FLOOR 8
 120#define DM9802_NOISE_FLOOR 5
 121
 122#define DMFE_WOL_LINKCHANGE     0x20000000
 123#define DMFE_WOL_SAMPLEPACKET   0x10000000
 124#define DMFE_WOL_MAGICPACKET    0x08000000
 125
 126
 127#define DMFE_10MHF      0
 128#define DMFE_100MHF     1
 129#define DMFE_10MFD      4
 130#define DMFE_100MFD     5
 131#define DMFE_AUTO       8
 132#define DMFE_1M_HPNA    0x10
 133
 134#define DMFE_TXTH_72    0x400000        /* TX TH 72 byte */
 135#define DMFE_TXTH_96    0x404000        /* TX TH 96 byte */
 136#define DMFE_TXTH_128   0x0000          /* TX TH 128 byte */
 137#define DMFE_TXTH_256   0x4000          /* TX TH 256 byte */
 138#define DMFE_TXTH_512   0x8000          /* TX TH 512 byte */
 139#define DMFE_TXTH_1K    0xC000          /* TX TH 1K  byte */
 140
 141#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
 142#define DMFE_TX_TIMEOUT ((3*HZ)/2)      /* tx packet time-out time 1.5 s" */
 143#define DMFE_TX_KICK    (HZ/2)  /* tx packet Kick-out time 0.5 s" */
 144
 145#define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
 146#define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
 147#define dr32(reg)       ioread32(ioaddr + (reg))
 148#define dr16(reg)       ioread16(ioaddr + (reg))
 149#define dr8(reg)        ioread8(ioaddr + (reg))
 150
 151#define DMFE_DBUG(dbug_now, msg, value)                 \
 152        do {                                            \
 153                if (dmfe_debug || (dbug_now))           \
 154                        pr_err("%s %lx\n",              \
 155                               (msg), (long) (value));  \
 156        } while (0)
 157
 158#define SHOW_MEDIA_TYPE(mode)                           \
 159        pr_info("Change Speed to %sMhz %s duplex\n" ,   \
 160                (mode & 1) ? "100":"10",                \
 161                (mode & 4) ? "full":"half");
 162
 163
 164/* CR9 definition: SROM/MII */
 165#define CR9_SROM_READ   0x4800
 166#define CR9_SRCS        0x1
 167#define CR9_SRCLK       0x2
 168#define CR9_CRDOUT      0x8
 169#define SROM_DATA_0     0x0
 170#define SROM_DATA_1     0x4
 171#define PHY_DATA_1      0x20000
 172#define PHY_DATA_0      0x00000
 173#define MDCLKH          0x10000
 174
 175#define PHY_POWER_DOWN  0x800
 176
 177#define SROM_V41_CODE   0x14
 178
 179#define __CHK_IO_SIZE(pci_id, dev_rev) \
 180 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
 181        DM9102A_IO_SIZE: DM9102_IO_SIZE)
 182
 183#define CHK_IO_SIZE(pci_dev) \
 184        (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
 185        (pci_dev)->revision))
 186
 187/* Structure/enum declaration ------------------------------- */
 188struct tx_desc {
 189        __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
 190        char *tx_buf_ptr;               /* Data for us */
 191        struct tx_desc *next_tx_desc;
 192} __attribute__(( aligned(32) ));
 193
 194struct rx_desc {
 195        __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
 196        struct sk_buff *rx_skb_ptr;     /* Data for us */
 197        struct rx_desc *next_rx_desc;
 198} __attribute__(( aligned(32) ));
 199
 200struct dmfe_board_info {
 201        u32 chip_id;                    /* Chip vendor/Device ID */
 202        u8 chip_revision;               /* Chip revision */
 203        struct net_device *next_dev;    /* next device */
 204        struct pci_dev *pdev;           /* PCI device */
 205        spinlock_t lock;
 206
 207        void __iomem *ioaddr;           /* I/O base address */
 208        u32 cr0_data;
 209        u32 cr5_data;
 210        u32 cr6_data;
 211        u32 cr7_data;
 212        u32 cr15_data;
 213
 214        /* pointer for memory physical address */
 215        dma_addr_t buf_pool_dma_ptr;    /* Tx buffer pool memory */
 216        dma_addr_t buf_pool_dma_start;  /* Tx buffer pool align dword */
 217        dma_addr_t desc_pool_dma_ptr;   /* descriptor pool memory */
 218        dma_addr_t first_tx_desc_dma;
 219        dma_addr_t first_rx_desc_dma;
 220
 221        /* descriptor pointer */
 222        unsigned char *buf_pool_ptr;    /* Tx buffer pool memory */
 223        unsigned char *buf_pool_start;  /* Tx buffer pool align dword */
 224        unsigned char *desc_pool_ptr;   /* descriptor pool memory */
 225        struct tx_desc *first_tx_desc;
 226        struct tx_desc *tx_insert_ptr;
 227        struct tx_desc *tx_remove_ptr;
 228        struct rx_desc *first_rx_desc;
 229        struct rx_desc *rx_insert_ptr;
 230        struct rx_desc *rx_ready_ptr;   /* packet come pointer */
 231        unsigned long tx_packet_cnt;    /* transmitted packet count */
 232        unsigned long tx_queue_cnt;     /* wait to send packet count */
 233        unsigned long rx_avail_cnt;     /* available rx descriptor count */
 234        unsigned long interval_rx_cnt;  /* rx packet count a callback time */
 235
 236        u16 HPNA_command;               /* For HPNA register 16 */
 237        u16 HPNA_timer;                 /* For HPNA remote device check */
 238        u16 dbug_cnt;
 239        u16 NIC_capability;             /* NIC media capability */
 240        u16 PHY_reg4;                   /* Saved Phyxcer register 4 value */
 241
 242        u8 HPNA_present;                /* 0:none, 1:DM9801, 2:DM9802 */
 243        u8 chip_type;                   /* Keep DM9102A chip type */
 244        u8 media_mode;                  /* user specify media mode */
 245        u8 op_mode;                     /* real work media mode */
 246        u8 phy_addr;
 247        u8 wait_reset;                  /* Hardware failed, need to reset */
 248        u8 dm910x_chk_mode;             /* Operating mode check */
 249        u8 first_in_callback;           /* Flag to record state */
 250        u8 wol_mode;                    /* user WOL settings */
 251        struct timer_list timer;
 252
 253        /* Driver defined statistic counter */
 254        unsigned long tx_fifo_underrun;
 255        unsigned long tx_loss_carrier;
 256        unsigned long tx_no_carrier;
 257        unsigned long tx_late_collision;
 258        unsigned long tx_excessive_collision;
 259        unsigned long tx_jabber_timeout;
 260        unsigned long reset_count;
 261        unsigned long reset_cr8;
 262        unsigned long reset_fatal;
 263        unsigned long reset_TXtimeout;
 264
 265        /* NIC SROM data */
 266        unsigned char srom[128];
 267};
 268
 269enum dmfe_offsets {
 270        DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
 271        DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
 272        DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
 273        DCR15 = 0x78
 274};
 275
 276enum dmfe_CR6_bits {
 277        CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
 278        CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
 279        CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
 280};
 281
 282/* Global variable declaration ----------------------------- */
 283static int printed_version;
 284static const char version[] =
 285        "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
 286
 287static int dmfe_debug;
 288static unsigned char dmfe_media_mode = DMFE_AUTO;
 289static u32 dmfe_cr6_user_set;
 290
 291/* For module input parameter */
 292static int debug;
 293static u32 cr6set;
 294static unsigned char mode = 8;
 295static u8 chkmode = 1;
 296static u8 HPNA_mode;            /* Default: Low Power/High Speed */
 297static u8 HPNA_rx_cmd;          /* Default: Disable Rx remote command */
 298static u8 HPNA_tx_cmd;          /* Default: Don't issue remote command */
 299static u8 HPNA_NoiseFloor;      /* Default: HPNA NoiseFloor */
 300static u8 SF_mode;              /* Special Function: 1:VLAN, 2:RX Flow Control
 301                                   4: TX pause packet */
 302
 303
 304/* function declaration ------------------------------------- */
 305static int dmfe_open(struct net_device *);
 306static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
 307static int dmfe_stop(struct net_device *);
 308static void dmfe_set_filter_mode(struct net_device *);
 309static const struct ethtool_ops netdev_ethtool_ops;
 310static u16 read_srom_word(void __iomem *, int);
 311static irqreturn_t dmfe_interrupt(int , void *);
 312#ifdef CONFIG_NET_POLL_CONTROLLER
 313static void poll_dmfe (struct net_device *dev);
 314#endif
 315static void dmfe_descriptor_init(struct net_device *);
 316static void allocate_rx_buffer(struct net_device *);
 317static void update_cr6(u32, void __iomem *);
 318static void send_filter_frame(struct net_device *);
 319static void dm9132_id_table(struct net_device *);
 320static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
 321static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
 322static void dmfe_phy_write_1bit(void __iomem *, u32);
 323static u16 dmfe_phy_read_1bit(void __iomem *);
 324static u8 dmfe_sense_speed(struct dmfe_board_info *);
 325static void dmfe_process_mode(struct dmfe_board_info *);
 326static void dmfe_timer(struct timer_list *);
 327static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
 328static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
 329static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
 330static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
 331static void dmfe_dynamic_reset(struct net_device *);
 332static void dmfe_free_rxbuffer(struct dmfe_board_info *);
 333static void dmfe_init_dm910x(struct net_device *);
 334static void dmfe_parse_srom(struct dmfe_board_info *);
 335static void dmfe_program_DM9801(struct dmfe_board_info *, int);
 336static void dmfe_program_DM9802(struct dmfe_board_info *);
 337static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
 338static void dmfe_set_phyxcer(struct dmfe_board_info *);
 339
 340/* DM910X network board routine ---------------------------- */
 341
 342static const struct net_device_ops netdev_ops = {
 343        .ndo_open               = dmfe_open,
 344        .ndo_stop               = dmfe_stop,
 345        .ndo_start_xmit         = dmfe_start_xmit,
 346        .ndo_set_rx_mode        = dmfe_set_filter_mode,
 347        .ndo_set_mac_address    = eth_mac_addr,
 348        .ndo_validate_addr      = eth_validate_addr,
 349#ifdef CONFIG_NET_POLL_CONTROLLER
 350        .ndo_poll_controller    = poll_dmfe,
 351#endif
 352};
 353
 354/*
 355 *      Search DM910X board ,allocate space and register it
 356 */
 357
 358static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 359{
 360        struct dmfe_board_info *db;     /* board information structure */
 361        struct net_device *dev;
 362        u32 pci_pmr;
 363        int i, err;
 364
 365        DMFE_DBUG(0, "dmfe_init_one()", 0);
 366
 367        if (!printed_version++)
 368                pr_info("%s\n", version);
 369
 370        /*
 371         *      SPARC on-board DM910x chips should be handled by the main
 372         *      tulip driver, except for early DM9100s.
 373         */
 374#ifdef CONFIG_TULIP_DM910X
 375        if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
 376            ent->driver_data == PCI_DM9102_ID) {
 377                struct device_node *dp = pci_device_to_OF_node(pdev);
 378
 379                if (dp && of_get_property(dp, "local-mac-address", NULL)) {
 380                        pr_info("skipping on-board DM910x (use tulip)\n");
 381                        return -ENODEV;
 382                }
 383        }
 384#endif
 385
 386        /* Init network device */
 387        dev = alloc_etherdev(sizeof(*db));
 388        if (dev == NULL)
 389                return -ENOMEM;
 390        SET_NETDEV_DEV(dev, &pdev->dev);
 391
 392        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 393                pr_warn("32-bit PCI DMA not available\n");
 394                err = -ENODEV;
 395                goto err_out_free;
 396        }
 397
 398        /* Enable Master/IO access, Disable memory access */
 399        err = pci_enable_device(pdev);
 400        if (err)
 401                goto err_out_free;
 402
 403        if (!pci_resource_start(pdev, 0)) {
 404                pr_err("I/O base is zero\n");
 405                err = -ENODEV;
 406                goto err_out_disable;
 407        }
 408
 409        if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
 410                pr_err("Allocated I/O size too small\n");
 411                err = -ENODEV;
 412                goto err_out_disable;
 413        }
 414
 415#if 0   /* pci_{enable_device,set_master} sets minimum latency for us now */
 416
 417        /* Set Latency Timer 80h */
 418        /* FIXME: setting values > 32 breaks some SiS 559x stuff.
 419           Need a PCI quirk.. */
 420
 421        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
 422#endif
 423
 424        if (pci_request_regions(pdev, DRV_NAME)) {
 425                pr_err("Failed to request PCI regions\n");
 426                err = -ENODEV;
 427                goto err_out_disable;
 428        }
 429
 430        /* Init system & device */
 431        db = netdev_priv(dev);
 432
 433        /* Allocate Tx/Rx descriptor memory */
 434        db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
 435                        DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
 436        if (!db->desc_pool_ptr) {
 437                err = -ENOMEM;
 438                goto err_out_res;
 439        }
 440
 441        db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
 442                        TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
 443        if (!db->buf_pool_ptr) {
 444                err = -ENOMEM;
 445                goto err_out_free_desc;
 446        }
 447
 448        db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 449        db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 450        db->buf_pool_start = db->buf_pool_ptr;
 451        db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 452
 453        db->chip_id = ent->driver_data;
 454        /* IO type range. */
 455        db->ioaddr = pci_iomap(pdev, 0, 0);
 456        if (!db->ioaddr) {
 457                err = -ENOMEM;
 458                goto err_out_free_buf;
 459        }
 460
 461        db->chip_revision = pdev->revision;
 462        db->wol_mode = 0;
 463
 464        db->pdev = pdev;
 465
 466        pci_set_drvdata(pdev, dev);
 467        dev->netdev_ops = &netdev_ops;
 468        dev->ethtool_ops = &netdev_ethtool_ops;
 469        netif_carrier_off(dev);
 470        spin_lock_init(&db->lock);
 471
 472        pci_read_config_dword(pdev, 0x50, &pci_pmr);
 473        pci_pmr &= 0x70000;
 474        if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
 475                db->chip_type = 1;      /* DM9102A E3 */
 476        else
 477                db->chip_type = 0;
 478
 479        /* read 64 word srom data */
 480        for (i = 0; i < 64; i++) {
 481                ((__le16 *) db->srom)[i] =
 482                        cpu_to_le16(read_srom_word(db->ioaddr, i));
 483        }
 484
 485        /* Set Node address */
 486        for (i = 0; i < 6; i++)
 487                dev->dev_addr[i] = db->srom[20 + i];
 488
 489        err = register_netdev (dev);
 490        if (err)
 491                goto err_out_unmap;
 492
 493        dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
 494                 ent->driver_data >> 16,
 495                 pci_name(pdev), dev->dev_addr, pdev->irq);
 496
 497        pci_set_master(pdev);
 498
 499        return 0;
 500
 501err_out_unmap:
 502        pci_iounmap(pdev, db->ioaddr);
 503err_out_free_buf:
 504        pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 505                            db->buf_pool_ptr, db->buf_pool_dma_ptr);
 506err_out_free_desc:
 507        pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 508                            db->desc_pool_ptr, db->desc_pool_dma_ptr);
 509err_out_res:
 510        pci_release_regions(pdev);
 511err_out_disable:
 512        pci_disable_device(pdev);
 513err_out_free:
 514        free_netdev(dev);
 515
 516        return err;
 517}
 518
 519
 520static void dmfe_remove_one(struct pci_dev *pdev)
 521{
 522        struct net_device *dev = pci_get_drvdata(pdev);
 523        struct dmfe_board_info *db = netdev_priv(dev);
 524
 525        DMFE_DBUG(0, "dmfe_remove_one()", 0);
 526
 527        if (dev) {
 528
 529                unregister_netdev(dev);
 530                pci_iounmap(db->pdev, db->ioaddr);
 531                pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 532                                        DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
 533                                        db->desc_pool_dma_ptr);
 534                pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 535                                        db->buf_pool_ptr, db->buf_pool_dma_ptr);
 536                pci_release_regions(pdev);
 537                free_netdev(dev);       /* free board information */
 538        }
 539
 540        DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
 541}
 542
 543
 544/*
 545 *      Open the interface.
 546 *      The interface is opened whenever "ifconfig" actives it.
 547 */
 548
 549static int dmfe_open(struct net_device *dev)
 550{
 551        struct dmfe_board_info *db = netdev_priv(dev);
 552        const int irq = db->pdev->irq;
 553        int ret;
 554
 555        DMFE_DBUG(0, "dmfe_open", 0);
 556
 557        ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
 558        if (ret)
 559                return ret;
 560
 561        /* system variable init */
 562        db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
 563        db->tx_packet_cnt = 0;
 564        db->tx_queue_cnt = 0;
 565        db->rx_avail_cnt = 0;
 566        db->wait_reset = 0;
 567
 568        db->first_in_callback = 0;
 569        db->NIC_capability = 0xf;       /* All capability*/
 570        db->PHY_reg4 = 0x1e0;
 571
 572        /* CR6 operation mode decision */
 573        if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
 574                (db->chip_revision >= 0x30) ) {
 575                db->cr6_data |= DMFE_TXTH_256;
 576                db->cr0_data = CR0_DEFAULT;
 577                db->dm910x_chk_mode=4;          /* Enter the normal mode */
 578        } else {
 579                db->cr6_data |= CR6_SFT;        /* Store & Forward mode */
 580                db->cr0_data = 0;
 581                db->dm910x_chk_mode = 1;        /* Enter the check mode */
 582        }
 583
 584        /* Initialize DM910X board */
 585        dmfe_init_dm910x(dev);
 586
 587        /* Active System Interface */
 588        netif_wake_queue(dev);
 589
 590        /* set and active a timer process */
 591        timer_setup(&db->timer, dmfe_timer, 0);
 592        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
 593        add_timer(&db->timer);
 594
 595        return 0;
 596}
 597
 598
 599/*      Initialize DM910X board
 600 *      Reset DM910X board
 601 *      Initialize TX/Rx descriptor chain structure
 602 *      Send the set-up frame
 603 *      Enable Tx/Rx machine
 604 */
 605
 606static void dmfe_init_dm910x(struct net_device *dev)
 607{
 608        struct dmfe_board_info *db = netdev_priv(dev);
 609        void __iomem *ioaddr = db->ioaddr;
 610
 611        DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 612
 613        /* Reset DM910x MAC controller */
 614        dw32(DCR0, DM910X_RESET);       /* RESET MAC */
 615        udelay(100);
 616        dw32(DCR0, db->cr0_data);
 617        udelay(5);
 618
 619        /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
 620        db->phy_addr = 1;
 621
 622        /* Parser SROM and media mode */
 623        dmfe_parse_srom(db);
 624        db->media_mode = dmfe_media_mode;
 625
 626        /* RESET Phyxcer Chip by GPR port bit 7 */
 627        dw32(DCR12, 0x180);             /* Let bit 7 output port */
 628        if (db->chip_id == PCI_DM9009_ID) {
 629                dw32(DCR12, 0x80);      /* Issue RESET signal */
 630                mdelay(300);                    /* Delay 300 ms */
 631        }
 632        dw32(DCR12, 0x0);       /* Clear RESET signal */
 633
 634        /* Process Phyxcer Media Mode */
 635        if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
 636                dmfe_set_phyxcer(db);
 637
 638        /* Media Mode Process */
 639        if ( !(db->media_mode & DMFE_AUTO) )
 640                db->op_mode = db->media_mode;   /* Force Mode */
 641
 642        /* Initialize Transmit/Receive descriptor and CR3/4 */
 643        dmfe_descriptor_init(dev);
 644
 645        /* Init CR6 to program DM910x operation */
 646        update_cr6(db->cr6_data, ioaddr);
 647
 648        /* Send setup frame */
 649        if (db->chip_id == PCI_DM9132_ID)
 650                dm9132_id_table(dev);   /* DM9132 */
 651        else
 652                send_filter_frame(dev); /* DM9102/DM9102A */
 653
 654        /* Init CR7, interrupt active bit */
 655        db->cr7_data = CR7_DEFAULT;
 656        dw32(DCR7, db->cr7_data);
 657
 658        /* Init CR15, Tx jabber and Rx watchdog timer */
 659        dw32(DCR15, db->cr15_data);
 660
 661        /* Enable DM910X Tx/Rx function */
 662        db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
 663        update_cr6(db->cr6_data, ioaddr);
 664}
 665
 666
 667/*
 668 *      Hardware start transmission.
 669 *      Send a packet to media from the upper layer.
 670 */
 671
 672static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 673                                         struct net_device *dev)
 674{
 675        struct dmfe_board_info *db = netdev_priv(dev);
 676        void __iomem *ioaddr = db->ioaddr;
 677        struct tx_desc *txptr;
 678        unsigned long flags;
 679
 680        DMFE_DBUG(0, "dmfe_start_xmit", 0);
 681
 682        /* Too large packet check */
 683        if (skb->len > MAX_PACKET_SIZE) {
 684                pr_err("big packet = %d\n", (u16)skb->len);
 685                dev_kfree_skb_any(skb);
 686                return NETDEV_TX_OK;
 687        }
 688
 689        /* Resource flag check */
 690        netif_stop_queue(dev);
 691
 692        spin_lock_irqsave(&db->lock, flags);
 693
 694        /* No Tx resource check, it never happen nromally */
 695        if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
 696                spin_unlock_irqrestore(&db->lock, flags);
 697                pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
 698                return NETDEV_TX_BUSY;
 699        }
 700
 701        /* Disable NIC interrupt */
 702        dw32(DCR7, 0);
 703
 704        /* transmit this packet */
 705        txptr = db->tx_insert_ptr;
 706        skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
 707        txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
 708
 709        /* Point to next transmit free descriptor */
 710        db->tx_insert_ptr = txptr->next_tx_desc;
 711
 712        /* Transmit Packet Process */
 713        if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
 714                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 715                db->tx_packet_cnt++;                    /* Ready to send */
 716                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 717                netif_trans_update(dev);                /* saved time stamp */
 718        } else {
 719                db->tx_queue_cnt++;                     /* queue TX packet */
 720                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 721        }
 722
 723        /* Tx resource check */
 724        if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
 725                netif_wake_queue(dev);
 726
 727        /* Restore CR7 to enable interrupt */
 728        spin_unlock_irqrestore(&db->lock, flags);
 729        dw32(DCR7, db->cr7_data);
 730
 731        /* free this SKB */
 732        dev_consume_skb_any(skb);
 733
 734        return NETDEV_TX_OK;
 735}
 736
 737
 738/*
 739 *      Stop the interface.
 740 *      The interface is stopped when it is brought.
 741 */
 742
 743static int dmfe_stop(struct net_device *dev)
 744{
 745        struct dmfe_board_info *db = netdev_priv(dev);
 746        void __iomem *ioaddr = db->ioaddr;
 747
 748        DMFE_DBUG(0, "dmfe_stop", 0);
 749
 750        /* disable system */
 751        netif_stop_queue(dev);
 752
 753        /* deleted timer */
 754        del_timer_sync(&db->timer);
 755
 756        /* Reset & stop DM910X board */
 757        dw32(DCR0, DM910X_RESET);
 758        udelay(100);
 759        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 760
 761        /* free interrupt */
 762        free_irq(db->pdev->irq, dev);
 763
 764        /* free allocated rx buffer */
 765        dmfe_free_rxbuffer(db);
 766
 767#if 0
 768        /* show statistic counter */
 769        printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
 770               db->tx_fifo_underrun, db->tx_excessive_collision,
 771               db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
 772               db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
 773               db->reset_fatal, db->reset_TXtimeout);
 774#endif
 775
 776        return 0;
 777}
 778
 779
 780/*
 781 *      DM9102 insterrupt handler
 782 *      receive the packet to upper layer, free the transmitted packet
 783 */
 784
 785static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 786{
 787        struct net_device *dev = dev_id;
 788        struct dmfe_board_info *db = netdev_priv(dev);
 789        void __iomem *ioaddr = db->ioaddr;
 790        unsigned long flags;
 791
 792        DMFE_DBUG(0, "dmfe_interrupt()", 0);
 793
 794        spin_lock_irqsave(&db->lock, flags);
 795
 796        /* Got DM910X status */
 797        db->cr5_data = dr32(DCR5);
 798        dw32(DCR5, db->cr5_data);
 799        if ( !(db->cr5_data & 0xc1) ) {
 800                spin_unlock_irqrestore(&db->lock, flags);
 801                return IRQ_HANDLED;
 802        }
 803
 804        /* Disable all interrupt in CR7 to solve the interrupt edge problem */
 805        dw32(DCR7, 0);
 806
 807        /* Check system status */
 808        if (db->cr5_data & 0x2000) {
 809                /* system bus error happen */
 810                DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
 811                db->reset_fatal++;
 812                db->wait_reset = 1;     /* Need to RESET */
 813                spin_unlock_irqrestore(&db->lock, flags);
 814                return IRQ_HANDLED;
 815        }
 816
 817         /* Received the coming packet */
 818        if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
 819                dmfe_rx_packet(dev, db);
 820
 821        /* reallocate rx descriptor buffer */
 822        if (db->rx_avail_cnt<RX_DESC_CNT)
 823                allocate_rx_buffer(dev);
 824
 825        /* Free the transmitted descriptor */
 826        if ( db->cr5_data & 0x01)
 827                dmfe_free_tx_pkt(dev, db);
 828
 829        /* Mode Check */
 830        if (db->dm910x_chk_mode & 0x2) {
 831                db->dm910x_chk_mode = 0x4;
 832                db->cr6_data |= 0x100;
 833                update_cr6(db->cr6_data, ioaddr);
 834        }
 835
 836        /* Restore CR7 to enable interrupt mask */
 837        dw32(DCR7, db->cr7_data);
 838
 839        spin_unlock_irqrestore(&db->lock, flags);
 840        return IRQ_HANDLED;
 841}
 842
 843
 844#ifdef CONFIG_NET_POLL_CONTROLLER
 845/*
 846 * Polling 'interrupt' - used by things like netconsole to send skbs
 847 * without having to re-enable interrupts. It's not called while
 848 * the interrupt routine is executing.
 849 */
 850
 851static void poll_dmfe (struct net_device *dev)
 852{
 853        struct dmfe_board_info *db = netdev_priv(dev);
 854        const int irq = db->pdev->irq;
 855
 856        /* disable_irq here is not very nice, but with the lockless
 857           interrupt handler we have no other choice. */
 858        disable_irq(irq);
 859        dmfe_interrupt (irq, dev);
 860        enable_irq(irq);
 861}
 862#endif
 863
 864/*
 865 *      Free TX resource after TX complete
 866 */
 867
 868static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
 869{
 870        struct tx_desc *txptr;
 871        void __iomem *ioaddr = db->ioaddr;
 872        u32 tdes0;
 873
 874        txptr = db->tx_remove_ptr;
 875        while(db->tx_packet_cnt) {
 876                tdes0 = le32_to_cpu(txptr->tdes0);
 877                if (tdes0 & 0x80000000)
 878                        break;
 879
 880                /* A packet sent completed */
 881                db->tx_packet_cnt--;
 882                dev->stats.tx_packets++;
 883
 884                /* Transmit statistic counter */
 885                if ( tdes0 != 0x7fffffff ) {
 886                        dev->stats.collisions += (tdes0 >> 3) & 0xf;
 887                        dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
 888                        if (tdes0 & TDES0_ERR_MASK) {
 889                                dev->stats.tx_errors++;
 890
 891                                if (tdes0 & 0x0002) {   /* UnderRun */
 892                                        db->tx_fifo_underrun++;
 893                                        if ( !(db->cr6_data & CR6_SFT) ) {
 894                                                db->cr6_data = db->cr6_data | CR6_SFT;
 895                                                update_cr6(db->cr6_data, ioaddr);
 896                                        }
 897                                }
 898                                if (tdes0 & 0x0100)
 899                                        db->tx_excessive_collision++;
 900                                if (tdes0 & 0x0200)
 901                                        db->tx_late_collision++;
 902                                if (tdes0 & 0x0400)
 903                                        db->tx_no_carrier++;
 904                                if (tdes0 & 0x0800)
 905                                        db->tx_loss_carrier++;
 906                                if (tdes0 & 0x4000)
 907                                        db->tx_jabber_timeout++;
 908                        }
 909                }
 910
 911                txptr = txptr->next_tx_desc;
 912        }/* End of while */
 913
 914        /* Update TX remove pointer to next */
 915        db->tx_remove_ptr = txptr;
 916
 917        /* Send the Tx packet in queue */
 918        if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
 919                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 920                db->tx_packet_cnt++;                    /* Ready to send */
 921                db->tx_queue_cnt--;
 922                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 923                netif_trans_update(dev);                /* saved time stamp */
 924        }
 925
 926        /* Resource available check */
 927        if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
 928                netif_wake_queue(dev);  /* Active upper layer, send again */
 929}
 930
 931
 932/*
 933 *      Calculate the CRC valude of the Rx packet
 934 *      flag =  1 : return the reverse CRC (for the received packet CRC)
 935 *              0 : return the normal CRC (for Hash Table index)
 936 */
 937
 938static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
 939{
 940        u32 crc = crc32(~0, Data, Len);
 941        if (flag) crc = ~crc;
 942        return crc;
 943}
 944
 945
 946/*
 947 *      Receive the come packet and pass to upper layer
 948 */
 949
 950static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
 951{
 952        struct rx_desc *rxptr;
 953        struct sk_buff *skb, *newskb;
 954        int rxlen;
 955        u32 rdes0;
 956
 957        rxptr = db->rx_ready_ptr;
 958
 959        while(db->rx_avail_cnt) {
 960                rdes0 = le32_to_cpu(rxptr->rdes0);
 961                if (rdes0 & 0x80000000) /* packet owner check */
 962                        break;
 963
 964                db->rx_avail_cnt--;
 965                db->interval_rx_cnt++;
 966
 967                pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
 968                                 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
 969
 970                if ( (rdes0 & 0x300) != 0x300) {
 971                        /* A packet without First/Last flag */
 972                        /* reuse this SKB */
 973                        DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
 974                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
 975                } else {
 976                        /* A packet with First/Last flag */
 977                        rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
 978
 979                        /* error summary bit check */
 980                        if (rdes0 & 0x8000) {
 981                                /* This is a error packet */
 982                                dev->stats.rx_errors++;
 983                                if (rdes0 & 1)
 984                                        dev->stats.rx_fifo_errors++;
 985                                if (rdes0 & 2)
 986                                        dev->stats.rx_crc_errors++;
 987                                if (rdes0 & 0x80)
 988                                        dev->stats.rx_length_errors++;
 989                        }
 990
 991                        if ( !(rdes0 & 0x8000) ||
 992                                ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
 993                                skb = rxptr->rx_skb_ptr;
 994
 995                                /* Received Packet CRC check need or not */
 996                                if ( (db->dm910x_chk_mode & 1) &&
 997                                        (cal_CRC(skb->data, rxlen, 1) !=
 998                                        (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
 999                                        /* Found a error received packet */
1000                                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1001                                        db->dm910x_chk_mode = 3;
1002                                } else {
1003                                        /* Good packet, send to upper layer */
1004                                        /* Shorst packet used new SKB */
1005                                        if ((rxlen < RX_COPY_SIZE) &&
1006                                                ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1007                                                != NULL)) {
1008
1009                                                skb = newskb;
1010                                                /* size less than COPY_SIZE, allocate a rxlen SKB */
1011                                                skb_reserve(skb, 2); /* 16byte align */
1012                                                skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1013                                                          skb_put(skb, rxlen),
1014                                                                          rxlen);
1015                                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1016                                        } else
1017                                                skb_put(skb, rxlen);
1018
1019                                        skb->protocol = eth_type_trans(skb, dev);
1020                                        netif_rx(skb);
1021                                        dev->stats.rx_packets++;
1022                                        dev->stats.rx_bytes += rxlen;
1023                                }
1024                        } else {
1025                                /* Reuse SKB buffer when the packet is error */
1026                                DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1027                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1028                        }
1029                }
1030
1031                rxptr = rxptr->next_rx_desc;
1032        }
1033
1034        db->rx_ready_ptr = rxptr;
1035}
1036
1037/*
1038 * Set DM910X multicast address
1039 */
1040
1041static void dmfe_set_filter_mode(struct net_device *dev)
1042{
1043        struct dmfe_board_info *db = netdev_priv(dev);
1044        unsigned long flags;
1045        int mc_count = netdev_mc_count(dev);
1046
1047        DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1048        spin_lock_irqsave(&db->lock, flags);
1049
1050        if (dev->flags & IFF_PROMISC) {
1051                DMFE_DBUG(0, "Enable PROM Mode", 0);
1052                db->cr6_data |= CR6_PM | CR6_PBF;
1053                update_cr6(db->cr6_data, db->ioaddr);
1054                spin_unlock_irqrestore(&db->lock, flags);
1055                return;
1056        }
1057
1058        if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1059                DMFE_DBUG(0, "Pass all multicast address", mc_count);
1060                db->cr6_data &= ~(CR6_PM | CR6_PBF);
1061                db->cr6_data |= CR6_PAM;
1062                spin_unlock_irqrestore(&db->lock, flags);
1063                return;
1064        }
1065
1066        DMFE_DBUG(0, "Set multicast address", mc_count);
1067        if (db->chip_id == PCI_DM9132_ID)
1068                dm9132_id_table(dev);   /* DM9132 */
1069        else
1070                send_filter_frame(dev); /* DM9102/DM9102A */
1071        spin_unlock_irqrestore(&db->lock, flags);
1072}
1073
1074/*
1075 *      Ethtool interace
1076 */
1077
1078static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1079                               struct ethtool_drvinfo *info)
1080{
1081        struct dmfe_board_info *np = netdev_priv(dev);
1082
1083        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1084        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1085        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1086}
1087
1088static int dmfe_ethtool_set_wol(struct net_device *dev,
1089                                struct ethtool_wolinfo *wolinfo)
1090{
1091        struct dmfe_board_info *db = netdev_priv(dev);
1092
1093        if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1094                                WAKE_ARP | WAKE_MAGICSECURE))
1095                   return -EOPNOTSUPP;
1096
1097        db->wol_mode = wolinfo->wolopts;
1098        return 0;
1099}
1100
1101static void dmfe_ethtool_get_wol(struct net_device *dev,
1102                                 struct ethtool_wolinfo *wolinfo)
1103{
1104        struct dmfe_board_info *db = netdev_priv(dev);
1105
1106        wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1107        wolinfo->wolopts = db->wol_mode;
1108}
1109
1110
1111static const struct ethtool_ops netdev_ethtool_ops = {
1112        .get_drvinfo            = dmfe_ethtool_get_drvinfo,
1113        .get_link               = ethtool_op_get_link,
1114        .set_wol                = dmfe_ethtool_set_wol,
1115        .get_wol                = dmfe_ethtool_get_wol,
1116};
1117
1118/*
1119 *      A periodic timer routine
1120 *      Dynamic media sense, allocate Rx buffer...
1121 */
1122
1123static void dmfe_timer(struct timer_list *t)
1124{
1125        struct dmfe_board_info *db = from_timer(db, t, timer);
1126        struct net_device *dev = pci_get_drvdata(db->pdev);
1127        void __iomem *ioaddr = db->ioaddr;
1128        u32 tmp_cr8;
1129        unsigned char tmp_cr12;
1130        unsigned long flags;
1131
1132        int link_ok, link_ok_phy;
1133
1134        DMFE_DBUG(0, "dmfe_timer()", 0);
1135        spin_lock_irqsave(&db->lock, flags);
1136
1137        /* Media mode process when Link OK before enter this route */
1138        if (db->first_in_callback == 0) {
1139                db->first_in_callback = 1;
1140                if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1141                        db->cr6_data &= ~0x40000;
1142                        update_cr6(db->cr6_data, ioaddr);
1143                        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1144                        db->cr6_data |= 0x40000;
1145                        update_cr6(db->cr6_data, ioaddr);
1146                        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1147                        add_timer(&db->timer);
1148                        spin_unlock_irqrestore(&db->lock, flags);
1149                        return;
1150                }
1151        }
1152
1153
1154        /* Operating Mode Check */
1155        if ( (db->dm910x_chk_mode & 0x1) &&
1156                (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1157                db->dm910x_chk_mode = 0x4;
1158
1159        /* Dynamic reset DM910X : system error or transmit time-out */
1160        tmp_cr8 = dr32(DCR8);
1161        if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1162                db->reset_cr8++;
1163                db->wait_reset = 1;
1164        }
1165        db->interval_rx_cnt = 0;
1166
1167        /* TX polling kick monitor */
1168        if ( db->tx_packet_cnt &&
1169             time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1170                dw32(DCR1, 0x1);   /* Tx polling again */
1171
1172                /* TX Timeout */
1173                if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1174                        db->reset_TXtimeout++;
1175                        db->wait_reset = 1;
1176                        dev_warn(&dev->dev, "Tx timeout - resetting\n");
1177                }
1178        }
1179
1180        if (db->wait_reset) {
1181                DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1182                db->reset_count++;
1183                dmfe_dynamic_reset(dev);
1184                db->first_in_callback = 0;
1185                db->timer.expires = DMFE_TIMER_WUT;
1186                add_timer(&db->timer);
1187                spin_unlock_irqrestore(&db->lock, flags);
1188                return;
1189        }
1190
1191        /* Link status check, Dynamic media type change */
1192        if (db->chip_id == PCI_DM9132_ID)
1193                tmp_cr12 = dr8(DCR9 + 3);       /* DM9132 */
1194        else
1195                tmp_cr12 = dr8(DCR12);          /* DM9102/DM9102A */
1196
1197        if ( ((db->chip_id == PCI_DM9102_ID) &&
1198                (db->chip_revision == 0x30)) ||
1199                ((db->chip_id == PCI_DM9132_ID) &&
1200                (db->chip_revision == 0x10)) ) {
1201                /* DM9102A Chip */
1202                if (tmp_cr12 & 2)
1203                        link_ok = 0;
1204                else
1205                        link_ok = 1;
1206        }
1207        else
1208                /*0x43 is used instead of 0x3 because bit 6 should represent
1209                        link status of external PHY */
1210                link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1211
1212
1213        /* If chip reports that link is failed it could be because external
1214                PHY link status pin is not connected correctly to chip
1215                To be sure ask PHY too.
1216        */
1217
1218        /* need a dummy read because of PHY's register latch*/
1219        dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1220        link_ok_phy = (dmfe_phy_read (db->ioaddr,
1221                                      db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1222
1223        if (link_ok_phy != link_ok) {
1224                DMFE_DBUG (0, "PHY and chip report different link status", 0);
1225                link_ok = link_ok | link_ok_phy;
1226        }
1227
1228        if ( !link_ok && netif_carrier_ok(dev)) {
1229                /* Link Failed */
1230                DMFE_DBUG(0, "Link Failed", tmp_cr12);
1231                netif_carrier_off(dev);
1232
1233                /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1234                /* AUTO or force 1M Homerun/Longrun don't need */
1235                if ( !(db->media_mode & 0x38) )
1236                        dmfe_phy_write(db->ioaddr, db->phy_addr,
1237                                       0, 0x1000, db->chip_id);
1238
1239                /* AUTO mode, if INT phyxcer link failed, select EXT device */
1240                if (db->media_mode & DMFE_AUTO) {
1241                        /* 10/100M link failed, used 1M Home-Net */
1242                        db->cr6_data|=0x00040000;       /* bit18=1, MII */
1243                        db->cr6_data&=~0x00000200;      /* bit9=0, HD mode */
1244                        update_cr6(db->cr6_data, ioaddr);
1245                }
1246        } else if (!netif_carrier_ok(dev)) {
1247
1248                DMFE_DBUG(0, "Link link OK", tmp_cr12);
1249
1250                /* Auto Sense Speed */
1251                if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1252                        netif_carrier_on(dev);
1253                        SHOW_MEDIA_TYPE(db->op_mode);
1254                }
1255
1256                dmfe_process_mode(db);
1257        }
1258
1259        /* HPNA remote command check */
1260        if (db->HPNA_command & 0xf00) {
1261                db->HPNA_timer--;
1262                if (!db->HPNA_timer)
1263                        dmfe_HPNA_remote_cmd_chk(db);
1264        }
1265
1266        /* Timer active again */
1267        db->timer.expires = DMFE_TIMER_WUT;
1268        add_timer(&db->timer);
1269        spin_unlock_irqrestore(&db->lock, flags);
1270}
1271
1272
1273/*
1274 *      Dynamic reset the DM910X board
1275 *      Stop DM910X board
1276 *      Free Tx/Rx allocated memory
1277 *      Reset DM910X board
1278 *      Re-initialize DM910X board
1279 */
1280
1281static void dmfe_dynamic_reset(struct net_device *dev)
1282{
1283        struct dmfe_board_info *db = netdev_priv(dev);
1284        void __iomem *ioaddr = db->ioaddr;
1285
1286        DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1287
1288        /* Sopt MAC controller */
1289        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1290        update_cr6(db->cr6_data, ioaddr);
1291        dw32(DCR7, 0);                          /* Disable Interrupt */
1292        dw32(DCR5, dr32(DCR5));
1293
1294        /* Disable upper layer interface */
1295        netif_stop_queue(dev);
1296
1297        /* Free Rx Allocate buffer */
1298        dmfe_free_rxbuffer(db);
1299
1300        /* system variable init */
1301        db->tx_packet_cnt = 0;
1302        db->tx_queue_cnt = 0;
1303        db->rx_avail_cnt = 0;
1304        netif_carrier_off(dev);
1305        db->wait_reset = 0;
1306
1307        /* Re-initialize DM910X board */
1308        dmfe_init_dm910x(dev);
1309
1310        /* Restart upper layer interface */
1311        netif_wake_queue(dev);
1312}
1313
1314
1315/*
1316 *      free all allocated rx buffer
1317 */
1318
1319static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1320{
1321        DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1322
1323        /* free allocated rx buffer */
1324        while (db->rx_avail_cnt) {
1325                dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1326                db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1327                db->rx_avail_cnt--;
1328        }
1329}
1330
1331
1332/*
1333 *      Reuse the SK buffer
1334 */
1335
1336static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1337{
1338        struct rx_desc *rxptr = db->rx_insert_ptr;
1339
1340        if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1341                rxptr->rx_skb_ptr = skb;
1342                rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1343                            skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1344                wmb();
1345                rxptr->rdes0 = cpu_to_le32(0x80000000);
1346                db->rx_avail_cnt++;
1347                db->rx_insert_ptr = rxptr->next_rx_desc;
1348        } else
1349                DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1350}
1351
1352
1353/*
1354 *      Initialize transmit/Receive descriptor
1355 *      Using Chain structure, and allocate Tx/Rx buffer
1356 */
1357
1358static void dmfe_descriptor_init(struct net_device *dev)
1359{
1360        struct dmfe_board_info *db = netdev_priv(dev);
1361        void __iomem *ioaddr = db->ioaddr;
1362        struct tx_desc *tmp_tx;
1363        struct rx_desc *tmp_rx;
1364        unsigned char *tmp_buf;
1365        dma_addr_t tmp_tx_dma, tmp_rx_dma;
1366        dma_addr_t tmp_buf_dma;
1367        int i;
1368
1369        DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1370
1371        /* tx descriptor start pointer */
1372        db->tx_insert_ptr = db->first_tx_desc;
1373        db->tx_remove_ptr = db->first_tx_desc;
1374        dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1375
1376        /* rx descriptor start pointer */
1377        db->first_rx_desc = (void *)db->first_tx_desc +
1378                        sizeof(struct tx_desc) * TX_DESC_CNT;
1379
1380        db->first_rx_desc_dma =  db->first_tx_desc_dma +
1381                        sizeof(struct tx_desc) * TX_DESC_CNT;
1382        db->rx_insert_ptr = db->first_rx_desc;
1383        db->rx_ready_ptr = db->first_rx_desc;
1384        dw32(DCR3, db->first_rx_desc_dma);              /* RX DESC address */
1385
1386        /* Init Transmit chain */
1387        tmp_buf = db->buf_pool_start;
1388        tmp_buf_dma = db->buf_pool_dma_start;
1389        tmp_tx_dma = db->first_tx_desc_dma;
1390        for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1391                tmp_tx->tx_buf_ptr = tmp_buf;
1392                tmp_tx->tdes0 = cpu_to_le32(0);
1393                tmp_tx->tdes1 = cpu_to_le32(0x81000000);        /* IC, chain */
1394                tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1395                tmp_tx_dma += sizeof(struct tx_desc);
1396                tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1397                tmp_tx->next_tx_desc = tmp_tx + 1;
1398                tmp_buf = tmp_buf + TX_BUF_ALLOC;
1399                tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1400        }
1401        (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1402        tmp_tx->next_tx_desc = db->first_tx_desc;
1403
1404         /* Init Receive descriptor chain */
1405        tmp_rx_dma=db->first_rx_desc_dma;
1406        for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1407                tmp_rx->rdes0 = cpu_to_le32(0);
1408                tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1409                tmp_rx_dma += sizeof(struct rx_desc);
1410                tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1411                tmp_rx->next_rx_desc = tmp_rx + 1;
1412        }
1413        (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1414        tmp_rx->next_rx_desc = db->first_rx_desc;
1415
1416        /* pre-allocate Rx buffer */
1417        allocate_rx_buffer(dev);
1418}
1419
1420
1421/*
1422 *      Update CR6 value
1423 *      Firstly stop DM910X , then written value and start
1424 */
1425
1426static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1427{
1428        u32 cr6_tmp;
1429
1430        cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1431        dw32(DCR6, cr6_tmp);
1432        udelay(5);
1433        dw32(DCR6, cr6_data);
1434        udelay(5);
1435}
1436
1437
1438/*
1439 *      Send a setup frame for DM9132
1440 *      This setup frame initialize DM910X address filter mode
1441*/
1442
1443static void dm9132_id_table(struct net_device *dev)
1444{
1445        struct dmfe_board_info *db = netdev_priv(dev);
1446        void __iomem *ioaddr = db->ioaddr + 0xc0;
1447        u16 *addrptr = (u16 *)dev->dev_addr;
1448        struct netdev_hw_addr *ha;
1449        u16 i, hash_table[4];
1450
1451        /* Node address */
1452        for (i = 0; i < 3; i++) {
1453                dw16(0, addrptr[i]);
1454                ioaddr += 4;
1455        }
1456
1457        /* Clear Hash Table */
1458        memset(hash_table, 0, sizeof(hash_table));
1459
1460        /* broadcast address */
1461        hash_table[3] = 0x8000;
1462
1463        /* the multicast address in Hash Table : 64 bits */
1464        netdev_for_each_mc_addr(ha, dev) {
1465                u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1466
1467                hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1468        }
1469
1470        /* Write the hash table to MAC MD table */
1471        for (i = 0; i < 4; i++, ioaddr += 4)
1472                dw16(0, hash_table[i]);
1473}
1474
1475
1476/*
1477 *      Send a setup frame for DM9102/DM9102A
1478 *      This setup frame initialize DM910X address filter mode
1479 */
1480
1481static void send_filter_frame(struct net_device *dev)
1482{
1483        struct dmfe_board_info *db = netdev_priv(dev);
1484        struct netdev_hw_addr *ha;
1485        struct tx_desc *txptr;
1486        u16 * addrptr;
1487        u32 * suptr;
1488        int i;
1489
1490        DMFE_DBUG(0, "send_filter_frame()", 0);
1491
1492        txptr = db->tx_insert_ptr;
1493        suptr = (u32 *) txptr->tx_buf_ptr;
1494
1495        /* Node address */
1496        addrptr = (u16 *) dev->dev_addr;
1497        *suptr++ = addrptr[0];
1498        *suptr++ = addrptr[1];
1499        *suptr++ = addrptr[2];
1500
1501        /* broadcast address */
1502        *suptr++ = 0xffff;
1503        *suptr++ = 0xffff;
1504        *suptr++ = 0xffff;
1505
1506        /* fit the multicast address */
1507        netdev_for_each_mc_addr(ha, dev) {
1508                addrptr = (u16 *) ha->addr;
1509                *suptr++ = addrptr[0];
1510                *suptr++ = addrptr[1];
1511                *suptr++ = addrptr[2];
1512        }
1513
1514        for (i = netdev_mc_count(dev); i < 14; i++) {
1515                *suptr++ = 0xffff;
1516                *suptr++ = 0xffff;
1517                *suptr++ = 0xffff;
1518        }
1519
1520        /* prepare the setup frame */
1521        db->tx_insert_ptr = txptr->next_tx_desc;
1522        txptr->tdes1 = cpu_to_le32(0x890000c0);
1523
1524        /* Resource Check and Send the setup packet */
1525        if (!db->tx_packet_cnt) {
1526                void __iomem *ioaddr = db->ioaddr;
1527
1528                /* Resource Empty */
1529                db->tx_packet_cnt++;
1530                txptr->tdes0 = cpu_to_le32(0x80000000);
1531                update_cr6(db->cr6_data | 0x2000, ioaddr);
1532                dw32(DCR1, 0x1);        /* Issue Tx polling */
1533                update_cr6(db->cr6_data, ioaddr);
1534                netif_trans_update(dev);
1535        } else
1536                db->tx_queue_cnt++;     /* Put in TX queue */
1537}
1538
1539
1540/*
1541 *      Allocate rx buffer,
1542 *      As possible as allocate maxiumn Rx buffer
1543 */
1544
1545static void allocate_rx_buffer(struct net_device *dev)
1546{
1547        struct dmfe_board_info *db = netdev_priv(dev);
1548        struct rx_desc *rxptr;
1549        struct sk_buff *skb;
1550
1551        rxptr = db->rx_insert_ptr;
1552
1553        while(db->rx_avail_cnt < RX_DESC_CNT) {
1554                if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1555                        break;
1556                rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1557                rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1558                                    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1559                wmb();
1560                rxptr->rdes0 = cpu_to_le32(0x80000000);
1561                rxptr = rxptr->next_rx_desc;
1562                db->rx_avail_cnt++;
1563        }
1564
1565        db->rx_insert_ptr = rxptr;
1566}
1567
1568static void srom_clk_write(void __iomem *ioaddr, u32 data)
1569{
1570        static const u32 cmd[] = {
1571                CR9_SROM_READ | CR9_SRCS,
1572                CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1573                CR9_SROM_READ | CR9_SRCS
1574        };
1575        int i;
1576
1577        for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1578                dw32(DCR9, data | cmd[i]);
1579                udelay(5);
1580        }
1581}
1582
1583/*
1584 *      Read one word data from the serial ROM
1585 */
1586static u16 read_srom_word(void __iomem *ioaddr, int offset)
1587{
1588        u16 srom_data;
1589        int i;
1590
1591        dw32(DCR9, CR9_SROM_READ);
1592        udelay(5);
1593        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1594        udelay(5);
1595
1596        /* Send the Read Command 110b */
1597        srom_clk_write(ioaddr, SROM_DATA_1);
1598        srom_clk_write(ioaddr, SROM_DATA_1);
1599        srom_clk_write(ioaddr, SROM_DATA_0);
1600
1601        /* Send the offset */
1602        for (i = 5; i >= 0; i--) {
1603                srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1604                srom_clk_write(ioaddr, srom_data);
1605        }
1606
1607        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1608        udelay(5);
1609
1610        for (i = 16; i > 0; i--) {
1611                dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1612                udelay(5);
1613                srom_data = (srom_data << 1) |
1614                                ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1615                dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1616                udelay(5);
1617        }
1618
1619        dw32(DCR9, CR9_SROM_READ);
1620        udelay(5);
1621        return srom_data;
1622}
1623
1624
1625/*
1626 *      Auto sense the media mode
1627 */
1628
1629static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1630{
1631        void __iomem *ioaddr = db->ioaddr;
1632        u8 ErrFlag = 0;
1633        u16 phy_mode;
1634
1635        /* CR6 bit18=0, select 10/100M */
1636        update_cr6(db->cr6_data & ~0x40000, ioaddr);
1637
1638        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1639        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1640
1641        if ( (phy_mode & 0x24) == 0x24 ) {
1642                if (db->chip_id == PCI_DM9132_ID)       /* DM9132 */
1643                        phy_mode = dmfe_phy_read(db->ioaddr,
1644                                                 db->phy_addr, 7, db->chip_id) & 0xf000;
1645                else                            /* DM9102/DM9102A */
1646                        phy_mode = dmfe_phy_read(db->ioaddr,
1647                                                 db->phy_addr, 17, db->chip_id) & 0xf000;
1648                switch (phy_mode) {
1649                case 0x1000: db->op_mode = DMFE_10MHF; break;
1650                case 0x2000: db->op_mode = DMFE_10MFD; break;
1651                case 0x4000: db->op_mode = DMFE_100MHF; break;
1652                case 0x8000: db->op_mode = DMFE_100MFD; break;
1653                default: db->op_mode = DMFE_10MHF;
1654                        ErrFlag = 1;
1655                        break;
1656                }
1657        } else {
1658                db->op_mode = DMFE_10MHF;
1659                DMFE_DBUG(0, "Link Failed :", phy_mode);
1660                ErrFlag = 1;
1661        }
1662
1663        return ErrFlag;
1664}
1665
1666
1667/*
1668 *      Set 10/100 phyxcer capability
1669 *      AUTO mode : phyxcer register4 is NIC capability
1670 *      Force mode: phyxcer register4 is the force media
1671 */
1672
1673static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1674{
1675        void __iomem *ioaddr = db->ioaddr;
1676        u16 phy_reg;
1677
1678        /* Select 10/100M phyxcer */
1679        db->cr6_data &= ~0x40000;
1680        update_cr6(db->cr6_data, ioaddr);
1681
1682        /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1683        if (db->chip_id == PCI_DM9009_ID) {
1684                phy_reg = dmfe_phy_read(db->ioaddr,
1685                                        db->phy_addr, 18, db->chip_id) & ~0x1000;
1686
1687                dmfe_phy_write(db->ioaddr,
1688                               db->phy_addr, 18, phy_reg, db->chip_id);
1689        }
1690
1691        /* Phyxcer capability setting */
1692        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1693
1694        if (db->media_mode & DMFE_AUTO) {
1695                /* AUTO Mode */
1696                phy_reg |= db->PHY_reg4;
1697        } else {
1698                /* Force Mode */
1699                switch(db->media_mode) {
1700                case DMFE_10MHF: phy_reg |= 0x20; break;
1701                case DMFE_10MFD: phy_reg |= 0x40; break;
1702                case DMFE_100MHF: phy_reg |= 0x80; break;
1703                case DMFE_100MFD: phy_reg |= 0x100; break;
1704                }
1705                if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1706        }
1707
1708        /* Write new capability to Phyxcer Reg4 */
1709        if ( !(phy_reg & 0x01e0)) {
1710                phy_reg|=db->PHY_reg4;
1711                db->media_mode|=DMFE_AUTO;
1712        }
1713        dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1714
1715        /* Restart Auto-Negotiation */
1716        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1717                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1718        if ( !db->chip_type )
1719                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1720}
1721
1722
1723/*
1724 *      Process op-mode
1725 *      AUTO mode : PHY controller in Auto-negotiation Mode
1726 *      Force mode: PHY controller in force mode with HUB
1727 *                      N-way force capability with SWITCH
1728 */
1729
1730static void dmfe_process_mode(struct dmfe_board_info *db)
1731{
1732        u16 phy_reg;
1733
1734        /* Full Duplex Mode Check */
1735        if (db->op_mode & 0x4)
1736                db->cr6_data |= CR6_FDM;        /* Set Full Duplex Bit */
1737        else
1738                db->cr6_data &= ~CR6_FDM;       /* Clear Full Duplex Bit */
1739
1740        /* Transciver Selection */
1741        if (db->op_mode & 0x10)         /* 1M HomePNA */
1742                db->cr6_data |= 0x40000;/* External MII select */
1743        else
1744                db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1745
1746        update_cr6(db->cr6_data, db->ioaddr);
1747
1748        /* 10/100M phyxcer force mode need */
1749        if ( !(db->media_mode & 0x18)) {
1750                /* Forece Mode */
1751                phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1752                if ( !(phy_reg & 0x1) ) {
1753                        /* parter without N-Way capability */
1754                        phy_reg = 0x0;
1755                        switch(db->op_mode) {
1756                        case DMFE_10MHF: phy_reg = 0x0; break;
1757                        case DMFE_10MFD: phy_reg = 0x100; break;
1758                        case DMFE_100MHF: phy_reg = 0x2000; break;
1759                        case DMFE_100MFD: phy_reg = 0x2100; break;
1760                        }
1761                        dmfe_phy_write(db->ioaddr,
1762                                       db->phy_addr, 0, phy_reg, db->chip_id);
1763                        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1764                                mdelay(20);
1765                        dmfe_phy_write(db->ioaddr,
1766                                       db->phy_addr, 0, phy_reg, db->chip_id);
1767                }
1768        }
1769}
1770
1771
1772/*
1773 *      Write a word to Phy register
1774 */
1775
1776static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1777                           u16 phy_data, u32 chip_id)
1778{
1779        u16 i;
1780
1781        if (chip_id == PCI_DM9132_ID) {
1782                dw16(0x80 + offset * 4, phy_data);
1783        } else {
1784                /* DM9102/DM9102A Chip */
1785
1786                /* Send 33 synchronization clock to Phy controller */
1787                for (i = 0; i < 35; i++)
1788                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1789
1790                /* Send start command(01) to Phy */
1791                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1792                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1793
1794                /* Send write command(01) to Phy */
1795                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1796                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1797
1798                /* Send Phy address */
1799                for (i = 0x10; i > 0; i = i >> 1)
1800                        dmfe_phy_write_1bit(ioaddr,
1801                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1802
1803                /* Send register address */
1804                for (i = 0x10; i > 0; i = i >> 1)
1805                        dmfe_phy_write_1bit(ioaddr,
1806                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1807
1808                /* written trasnition */
1809                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1810                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1811
1812                /* Write a word data to PHY controller */
1813                for ( i = 0x8000; i > 0; i >>= 1)
1814                        dmfe_phy_write_1bit(ioaddr,
1815                                            phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1816        }
1817}
1818
1819
1820/*
1821 *      Read a word data from phy register
1822 */
1823
1824static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1825{
1826        int i;
1827        u16 phy_data;
1828
1829        if (chip_id == PCI_DM9132_ID) {
1830                /* DM9132 Chip */
1831                phy_data = dr16(0x80 + offset * 4);
1832        } else {
1833                /* DM9102/DM9102A Chip */
1834
1835                /* Send 33 synchronization clock to Phy controller */
1836                for (i = 0; i < 35; i++)
1837                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1838
1839                /* Send start command(01) to Phy */
1840                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1841                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1842
1843                /* Send read command(10) to Phy */
1844                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1845                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1846
1847                /* Send Phy address */
1848                for (i = 0x10; i > 0; i = i >> 1)
1849                        dmfe_phy_write_1bit(ioaddr,
1850                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1851
1852                /* Send register address */
1853                for (i = 0x10; i > 0; i = i >> 1)
1854                        dmfe_phy_write_1bit(ioaddr,
1855                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1856
1857                /* Skip transition state */
1858                dmfe_phy_read_1bit(ioaddr);
1859
1860                /* read 16bit data */
1861                for (phy_data = 0, i = 0; i < 16; i++) {
1862                        phy_data <<= 1;
1863                        phy_data |= dmfe_phy_read_1bit(ioaddr);
1864                }
1865        }
1866
1867        return phy_data;
1868}
1869
1870
1871/*
1872 *      Write one bit data to Phy Controller
1873 */
1874
1875static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1876{
1877        dw32(DCR9, phy_data);           /* MII Clock Low */
1878        udelay(1);
1879        dw32(DCR9, phy_data | MDCLKH);  /* MII Clock High */
1880        udelay(1);
1881        dw32(DCR9, phy_data);           /* MII Clock Low */
1882        udelay(1);
1883}
1884
1885
1886/*
1887 *      Read one bit phy data from PHY controller
1888 */
1889
1890static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1891{
1892        u16 phy_data;
1893
1894        dw32(DCR9, 0x50000);
1895        udelay(1);
1896        phy_data = (dr32(DCR9) >> 19) & 0x1;
1897        dw32(DCR9, 0x40000);
1898        udelay(1);
1899
1900        return phy_data;
1901}
1902
1903
1904/*
1905 *      Parser SROM and media mode
1906 */
1907
1908static void dmfe_parse_srom(struct dmfe_board_info * db)
1909{
1910        char * srom = db->srom;
1911        int dmfe_mode, tmp_reg;
1912
1913        DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1914
1915        /* Init CR15 */
1916        db->cr15_data = CR15_DEFAULT;
1917
1918        /* Check SROM Version */
1919        if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1920                /* SROM V4.01 */
1921                /* Get NIC support media mode */
1922                db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1923                db->PHY_reg4 = 0;
1924                for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1925                        switch( db->NIC_capability & tmp_reg ) {
1926                        case 0x1: db->PHY_reg4 |= 0x0020; break;
1927                        case 0x2: db->PHY_reg4 |= 0x0040; break;
1928                        case 0x4: db->PHY_reg4 |= 0x0080; break;
1929                        case 0x8: db->PHY_reg4 |= 0x0100; break;
1930                        }
1931                }
1932
1933                /* Media Mode Force or not check */
1934                dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1935                             le32_to_cpup((__le32 *) (srom + 36)));
1936                switch(dmfe_mode) {
1937                case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1938                case 0x2: dmfe_media_mode = DMFE_10MFD; break;  /* 10MFD */
1939                case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1940                case 0x100:
1941                case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1942                }
1943
1944                /* Special Function setting */
1945                /* VLAN function */
1946                if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1947                        db->cr15_data |= 0x40;
1948
1949                /* Flow Control */
1950                if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1951                        db->cr15_data |= 0x400;
1952
1953                /* TX pause packet */
1954                if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1955                        db->cr15_data |= 0x9800;
1956        }
1957
1958        /* Parse HPNA parameter */
1959        db->HPNA_command = 1;
1960
1961        /* Accept remote command or not */
1962        if (HPNA_rx_cmd == 0)
1963                db->HPNA_command |= 0x8000;
1964
1965         /* Issue remote command & operation mode */
1966        if (HPNA_tx_cmd == 1)
1967                switch(HPNA_mode) {     /* Issue Remote Command */
1968                case 0: db->HPNA_command |= 0x0904; break;
1969                case 1: db->HPNA_command |= 0x0a00; break;
1970                case 2: db->HPNA_command |= 0x0506; break;
1971                case 3: db->HPNA_command |= 0x0602; break;
1972                }
1973        else
1974                switch(HPNA_mode) {     /* Don't Issue */
1975                case 0: db->HPNA_command |= 0x0004; break;
1976                case 1: db->HPNA_command |= 0x0000; break;
1977                case 2: db->HPNA_command |= 0x0006; break;
1978                case 3: db->HPNA_command |= 0x0002; break;
1979                }
1980
1981        /* Check DM9801 or DM9802 present or not */
1982        db->HPNA_present = 0;
1983        update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1984        tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1985        if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1986                /* DM9801 or DM9802 present */
1987                db->HPNA_timer = 8;
1988                if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1989                        /* DM9801 HomeRun */
1990                        db->HPNA_present = 1;
1991                        dmfe_program_DM9801(db, tmp_reg);
1992                } else {
1993                        /* DM9802 LongRun */
1994                        db->HPNA_present = 2;
1995                        dmfe_program_DM9802(db);
1996                }
1997        }
1998
1999}
2000
2001
2002/*
2003 *      Init HomeRun DM9801
2004 */
2005
2006static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2007{
2008        uint reg17, reg25;
2009
2010        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2011        switch(HPNA_rev) {
2012        case 0xb900: /* DM9801 E3 */
2013                db->HPNA_command |= 0x1000;
2014                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2015                reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2016                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2017                break;
2018        case 0xb901: /* DM9801 E4 */
2019                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2020                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2021                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2022                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2023                break;
2024        case 0xb902: /* DM9801 E5 */
2025        case 0xb903: /* DM9801 E6 */
2026        default:
2027                db->HPNA_command |= 0x1000;
2028                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2029                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2030                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2031                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2032                break;
2033        }
2034        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2035        dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2036        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2037}
2038
2039
2040/*
2041 *      Init HomeRun DM9802
2042 */
2043
2044static void dmfe_program_DM9802(struct dmfe_board_info * db)
2045{
2046        uint phy_reg;
2047
2048        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2049        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2050        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2051        phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2052        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2053}
2054
2055
2056/*
2057 *      Check remote HPNA power and speed status. If not correct,
2058 *      issue command again.
2059*/
2060
2061static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2062{
2063        uint phy_reg;
2064
2065        /* Got remote device status */
2066        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2067        switch(phy_reg) {
2068        case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2069        case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2070        case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2071        case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2072        }
2073
2074        /* Check remote device status match our setting ot not */
2075        if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2076                dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2077                               db->chip_id);
2078                db->HPNA_timer=8;
2079        } else
2080                db->HPNA_timer=600;     /* Match, every 10 minutes, check */
2081}
2082
2083
2084
2085static const struct pci_device_id dmfe_pci_tbl[] = {
2086        { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2087        { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2088        { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2089        { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2090        { 0, }
2091};
2092MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2093
2094
2095#ifdef CONFIG_PM
2096static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2097{
2098        struct net_device *dev = pci_get_drvdata(pci_dev);
2099        struct dmfe_board_info *db = netdev_priv(dev);
2100        void __iomem *ioaddr = db->ioaddr;
2101        u32 tmp;
2102
2103        /* Disable upper layer interface */
2104        netif_device_detach(dev);
2105
2106        /* Disable Tx/Rx */
2107        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2108        update_cr6(db->cr6_data, ioaddr);
2109
2110        /* Disable Interrupt */
2111        dw32(DCR7, 0);
2112        dw32(DCR5, dr32(DCR5));
2113
2114        /* Fre RX buffers */
2115        dmfe_free_rxbuffer(db);
2116
2117        /* Enable WOL */
2118        pci_read_config_dword(pci_dev, 0x40, &tmp);
2119        tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2120
2121        if (db->wol_mode & WAKE_PHY)
2122                tmp |= DMFE_WOL_LINKCHANGE;
2123        if (db->wol_mode & WAKE_MAGIC)
2124                tmp |= DMFE_WOL_MAGICPACKET;
2125
2126        pci_write_config_dword(pci_dev, 0x40, tmp);
2127
2128        pci_enable_wake(pci_dev, PCI_D3hot, 1);
2129        pci_enable_wake(pci_dev, PCI_D3cold, 1);
2130
2131        /* Power down device*/
2132        pci_save_state(pci_dev);
2133        pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2134
2135        return 0;
2136}
2137
2138static int dmfe_resume(struct pci_dev *pci_dev)
2139{
2140        struct net_device *dev = pci_get_drvdata(pci_dev);
2141        u32 tmp;
2142
2143        pci_set_power_state(pci_dev, PCI_D0);
2144        pci_restore_state(pci_dev);
2145
2146        /* Re-initialize DM910X board */
2147        dmfe_init_dm910x(dev);
2148
2149        /* Disable WOL */
2150        pci_read_config_dword(pci_dev, 0x40, &tmp);
2151
2152        tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2153        pci_write_config_dword(pci_dev, 0x40, tmp);
2154
2155        pci_enable_wake(pci_dev, PCI_D3hot, 0);
2156        pci_enable_wake(pci_dev, PCI_D3cold, 0);
2157
2158        /* Restart upper layer interface */
2159        netif_device_attach(dev);
2160
2161        return 0;
2162}
2163#else
2164#define dmfe_suspend NULL
2165#define dmfe_resume NULL
2166#endif
2167
2168static struct pci_driver dmfe_driver = {
2169        .name           = "dmfe",
2170        .id_table       = dmfe_pci_tbl,
2171        .probe          = dmfe_init_one,
2172        .remove         = dmfe_remove_one,
2173        .suspend        = dmfe_suspend,
2174        .resume         = dmfe_resume
2175};
2176
2177MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2178MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2179MODULE_LICENSE("GPL");
2180MODULE_VERSION(DRV_VERSION);
2181
2182module_param(debug, int, 0);
2183module_param(mode, byte, 0);
2184module_param(cr6set, int, 0);
2185module_param(chkmode, byte, 0);
2186module_param(HPNA_mode, byte, 0);
2187module_param(HPNA_rx_cmd, byte, 0);
2188module_param(HPNA_tx_cmd, byte, 0);
2189module_param(HPNA_NoiseFloor, byte, 0);
2190module_param(SF_mode, byte, 0);
2191MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2192MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2193                "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2194
2195MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2196                "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2197
2198/*      Description:
2199 *      when user used insmod to add module, system invoked init_module()
2200 *      to initialize and register.
2201 */
2202
2203static int __init dmfe_init_module(void)
2204{
2205        int rc;
2206
2207        pr_info("%s\n", version);
2208        printed_version = 1;
2209
2210        DMFE_DBUG(0, "init_module() ", debug);
2211
2212        if (debug)
2213                dmfe_debug = debug;     /* set debug flag */
2214        if (cr6set)
2215                dmfe_cr6_user_set = cr6set;
2216
2217        switch(mode) {
2218        case DMFE_10MHF:
2219        case DMFE_100MHF:
2220        case DMFE_10MFD:
2221        case DMFE_100MFD:
2222        case DMFE_1M_HPNA:
2223                dmfe_media_mode = mode;
2224                break;
2225        default:dmfe_media_mode = DMFE_AUTO;
2226                break;
2227        }
2228
2229        if (HPNA_mode > 4)
2230                HPNA_mode = 0;          /* Default: LP/HS */
2231        if (HPNA_rx_cmd > 1)
2232                HPNA_rx_cmd = 0;        /* Default: Ignored remote cmd */
2233        if (HPNA_tx_cmd > 1)
2234                HPNA_tx_cmd = 0;        /* Default: Don't issue remote cmd */
2235        if (HPNA_NoiseFloor > 15)
2236                HPNA_NoiseFloor = 0;
2237
2238        rc = pci_register_driver(&dmfe_driver);
2239        if (rc < 0)
2240                return rc;
2241
2242        return 0;
2243}
2244
2245
2246/*
2247 *      Description:
2248 *      when user used rmmod to delete module, system invoked clean_module()
2249 *      to un-register all registered services.
2250 */
2251
2252static void __exit dmfe_cleanup_module(void)
2253{
2254        DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2255        pci_unregister_driver(&dmfe_driver);
2256}
2257
2258module_init(dmfe_init_module);
2259module_exit(dmfe_cleanup_module);
2260