linux/drivers/net/ethernet/dec/tulip/dmfe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
   4    ethernet driver for Linux.
   5    Copyright (C) 1997  Sten Wang
   6
   7
   8    DAVICOM Web-Site: www.davicom.com.tw
   9
  10    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
  11    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
  12
  13    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
  14
  15    Marcelo Tosatti <marcelo@conectiva.com.br> :
  16    Made it compile in 2.3 (device to net_device)
  17
  18    Alan Cox <alan@lxorguk.ukuu.org.uk> :
  19    Cleaned up for kernel merge.
  20    Removed the back compatibility support
  21    Reformatted, fixing spelling etc as I went
  22    Removed IRQ 0-15 assumption
  23
  24    Jeff Garzik <jgarzik@pobox.com> :
  25    Updated to use new PCI driver API.
  26    Resource usage cleanups.
  27    Report driver version to user.
  28
  29    Tobias Ringstrom <tori@unhappy.mine.nu> :
  30    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
  31    Andrew Morton and Frank Davis for the SMP safety fixes.
  32
  33    Vojtech Pavlik <vojtech@suse.cz> :
  34    Cleaned up pointer arithmetics.
  35    Fixed a lot of 64bit issues.
  36    Cleaned up printk()s a bit.
  37    Fixed some obvious big endian problems.
  38
  39    Tobias Ringstrom <tori@unhappy.mine.nu> :
  40    Use time_after for jiffies calculation.  Added ethtool
  41    support.  Updated PCI resource allocation.  Do not
  42    forget to unmap PCI mapped skbs.
  43
  44    Alan Cox <alan@lxorguk.ukuu.org.uk>
  45    Added new PCI identifiers provided by Clear Zhang at ALi
  46    for their 1563 ethernet device.
  47
  48    TODO
  49
  50    Check on 64 bit boxes.
  51    Check and fix on big endian boxes.
  52
  53    Test and make sure PCI latency is now correct for all cases.
  54*/
  55
  56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  57
  58#define DRV_NAME        "dmfe"
  59
  60#include <linux/module.h>
  61#include <linux/kernel.h>
  62#include <linux/string.h>
  63#include <linux/timer.h>
  64#include <linux/ptrace.h>
  65#include <linux/errno.h>
  66#include <linux/ioport.h>
  67#include <linux/interrupt.h>
  68#include <linux/pci.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/init.h>
  71#include <linux/netdevice.h>
  72#include <linux/etherdevice.h>
  73#include <linux/ethtool.h>
  74#include <linux/skbuff.h>
  75#include <linux/delay.h>
  76#include <linux/spinlock.h>
  77#include <linux/crc32.h>
  78#include <linux/bitops.h>
  79
  80#include <asm/processor.h>
  81#include <asm/io.h>
  82#include <asm/dma.h>
  83#include <linux/uaccess.h>
  84#include <asm/irq.h>
  85
  86#ifdef CONFIG_TULIP_DM910X
  87#include <linux/of.h>
  88#endif
  89
  90
  91/* Board/System/Debug information/definition ---------------- */
  92#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
  93#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
  94#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
  95#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
  96
  97#define DM9102_IO_SIZE  0x80
  98#define DM9102A_IO_SIZE 0x100
  99#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
 100#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
 101#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
 102#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)      /* Max TX packet count */
 103#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)      /* TX wakeup count */
 104#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
 105#define TX_BUF_ALLOC    0x600
 106#define RX_ALLOC_SIZE   0x620
 107#define DM910X_RESET    1
 108#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
 109#define CR6_DEFAULT     0x00080000      /* HD */
 110#define CR7_DEFAULT     0x180c1
 111#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
 112#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
 113#define MAX_PACKET_SIZE 1514
 114#define DMFE_MAX_MULTICAST 14
 115#define RX_COPY_SIZE    100
 116#define MAX_CHECK_PACKET 0x8000
 117#define DM9801_NOISE_FLOOR 8
 118#define DM9802_NOISE_FLOOR 5
 119
 120#define DMFE_WOL_LINKCHANGE     0x20000000
 121#define DMFE_WOL_SAMPLEPACKET   0x10000000
 122#define DMFE_WOL_MAGICPACKET    0x08000000
 123
 124
 125#define DMFE_10MHF      0
 126#define DMFE_100MHF     1
 127#define DMFE_10MFD      4
 128#define DMFE_100MFD     5
 129#define DMFE_AUTO       8
 130#define DMFE_1M_HPNA    0x10
 131
 132#define DMFE_TXTH_72    0x400000        /* TX TH 72 byte */
 133#define DMFE_TXTH_96    0x404000        /* TX TH 96 byte */
 134#define DMFE_TXTH_128   0x0000          /* TX TH 128 byte */
 135#define DMFE_TXTH_256   0x4000          /* TX TH 256 byte */
 136#define DMFE_TXTH_512   0x8000          /* TX TH 512 byte */
 137#define DMFE_TXTH_1K    0xC000          /* TX TH 1K  byte */
 138
 139#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
 140#define DMFE_TX_TIMEOUT ((3*HZ)/2)      /* tx packet time-out time 1.5 s" */
 141#define DMFE_TX_KICK    (HZ/2)  /* tx packet Kick-out time 0.5 s" */
 142
 143#define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
 144#define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
 145#define dr32(reg)       ioread32(ioaddr + (reg))
 146#define dr16(reg)       ioread16(ioaddr + (reg))
 147#define dr8(reg)        ioread8(ioaddr + (reg))
 148
 149#define DMFE_DBUG(dbug_now, msg, value)                 \
 150        do {                                            \
 151                if (dmfe_debug || (dbug_now))           \
 152                        pr_err("%s %lx\n",              \
 153                               (msg), (long) (value));  \
 154        } while (0)
 155
 156#define SHOW_MEDIA_TYPE(mode)                           \
 157        pr_info("Change Speed to %sMhz %s duplex\n" ,   \
 158                (mode & 1) ? "100":"10",                \
 159                (mode & 4) ? "full":"half");
 160
 161
 162/* CR9 definition: SROM/MII */
 163#define CR9_SROM_READ   0x4800
 164#define CR9_SRCS        0x1
 165#define CR9_SRCLK       0x2
 166#define CR9_CRDOUT      0x8
 167#define SROM_DATA_0     0x0
 168#define SROM_DATA_1     0x4
 169#define PHY_DATA_1      0x20000
 170#define PHY_DATA_0      0x00000
 171#define MDCLKH          0x10000
 172
 173#define PHY_POWER_DOWN  0x800
 174
 175#define SROM_V41_CODE   0x14
 176
 177#define __CHK_IO_SIZE(pci_id, dev_rev) \
 178 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
 179        DM9102A_IO_SIZE: DM9102_IO_SIZE)
 180
 181#define CHK_IO_SIZE(pci_dev) \
 182        (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
 183        (pci_dev)->revision))
 184
 185/* Structure/enum declaration ------------------------------- */
 186struct tx_desc {
 187        __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
 188        char *tx_buf_ptr;               /* Data for us */
 189        struct tx_desc *next_tx_desc;
 190} __attribute__(( aligned(32) ));
 191
 192struct rx_desc {
 193        __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
 194        struct sk_buff *rx_skb_ptr;     /* Data for us */
 195        struct rx_desc *next_rx_desc;
 196} __attribute__(( aligned(32) ));
 197
 198struct dmfe_board_info {
 199        u32 chip_id;                    /* Chip vendor/Device ID */
 200        u8 chip_revision;               /* Chip revision */
 201        struct net_device *next_dev;    /* next device */
 202        struct pci_dev *pdev;           /* PCI device */
 203        spinlock_t lock;
 204
 205        void __iomem *ioaddr;           /* I/O base address */
 206        u32 cr0_data;
 207        u32 cr5_data;
 208        u32 cr6_data;
 209        u32 cr7_data;
 210        u32 cr15_data;
 211
 212        /* pointer for memory physical address */
 213        dma_addr_t buf_pool_dma_ptr;    /* Tx buffer pool memory */
 214        dma_addr_t buf_pool_dma_start;  /* Tx buffer pool align dword */
 215        dma_addr_t desc_pool_dma_ptr;   /* descriptor pool memory */
 216        dma_addr_t first_tx_desc_dma;
 217        dma_addr_t first_rx_desc_dma;
 218
 219        /* descriptor pointer */
 220        unsigned char *buf_pool_ptr;    /* Tx buffer pool memory */
 221        unsigned char *buf_pool_start;  /* Tx buffer pool align dword */
 222        unsigned char *desc_pool_ptr;   /* descriptor pool memory */
 223        struct tx_desc *first_tx_desc;
 224        struct tx_desc *tx_insert_ptr;
 225        struct tx_desc *tx_remove_ptr;
 226        struct rx_desc *first_rx_desc;
 227        struct rx_desc *rx_insert_ptr;
 228        struct rx_desc *rx_ready_ptr;   /* packet come pointer */
 229        unsigned long tx_packet_cnt;    /* transmitted packet count */
 230        unsigned long tx_queue_cnt;     /* wait to send packet count */
 231        unsigned long rx_avail_cnt;     /* available rx descriptor count */
 232        unsigned long interval_rx_cnt;  /* rx packet count a callback time */
 233
 234        u16 HPNA_command;               /* For HPNA register 16 */
 235        u16 HPNA_timer;                 /* For HPNA remote device check */
 236        u16 dbug_cnt;
 237        u16 NIC_capability;             /* NIC media capability */
 238        u16 PHY_reg4;                   /* Saved Phyxcer register 4 value */
 239
 240        u8 HPNA_present;                /* 0:none, 1:DM9801, 2:DM9802 */
 241        u8 chip_type;                   /* Keep DM9102A chip type */
 242        u8 media_mode;                  /* user specify media mode */
 243        u8 op_mode;                     /* real work media mode */
 244        u8 phy_addr;
 245        u8 wait_reset;                  /* Hardware failed, need to reset */
 246        u8 dm910x_chk_mode;             /* Operating mode check */
 247        u8 first_in_callback;           /* Flag to record state */
 248        u8 wol_mode;                    /* user WOL settings */
 249        struct timer_list timer;
 250
 251        /* Driver defined statistic counter */
 252        unsigned long tx_fifo_underrun;
 253        unsigned long tx_loss_carrier;
 254        unsigned long tx_no_carrier;
 255        unsigned long tx_late_collision;
 256        unsigned long tx_excessive_collision;
 257        unsigned long tx_jabber_timeout;
 258        unsigned long reset_count;
 259        unsigned long reset_cr8;
 260        unsigned long reset_fatal;
 261        unsigned long reset_TXtimeout;
 262
 263        /* NIC SROM data */
 264        unsigned char srom[128];
 265};
 266
 267enum dmfe_offsets {
 268        DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
 269        DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
 270        DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
 271        DCR15 = 0x78
 272};
 273
 274enum dmfe_CR6_bits {
 275        CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
 276        CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
 277        CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
 278};
 279
 280/* Global variable declaration ----------------------------- */
 281static int dmfe_debug;
 282static unsigned char dmfe_media_mode = DMFE_AUTO;
 283static u32 dmfe_cr6_user_set;
 284
 285/* For module input parameter */
 286static int debug;
 287static u32 cr6set;
 288static unsigned char mode = 8;
 289static u8 chkmode = 1;
 290static u8 HPNA_mode;            /* Default: Low Power/High Speed */
 291static u8 HPNA_rx_cmd;          /* Default: Disable Rx remote command */
 292static u8 HPNA_tx_cmd;          /* Default: Don't issue remote command */
 293static u8 HPNA_NoiseFloor;      /* Default: HPNA NoiseFloor */
 294static u8 SF_mode;              /* Special Function: 1:VLAN, 2:RX Flow Control
 295                                   4: TX pause packet */
 296
 297
 298/* function declaration ------------------------------------- */
 299static int dmfe_open(struct net_device *);
 300static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
 301static int dmfe_stop(struct net_device *);
 302static void dmfe_set_filter_mode(struct net_device *);
 303static const struct ethtool_ops netdev_ethtool_ops;
 304static u16 read_srom_word(void __iomem *, int);
 305static irqreturn_t dmfe_interrupt(int , void *);
 306#ifdef CONFIG_NET_POLL_CONTROLLER
 307static void poll_dmfe (struct net_device *dev);
 308#endif
 309static void dmfe_descriptor_init(struct net_device *);
 310static void allocate_rx_buffer(struct net_device *);
 311static void update_cr6(u32, void __iomem *);
 312static void send_filter_frame(struct net_device *);
 313static void dm9132_id_table(struct net_device *);
 314static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
 315static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
 316static void dmfe_phy_write_1bit(void __iomem *, u32);
 317static u16 dmfe_phy_read_1bit(void __iomem *);
 318static u8 dmfe_sense_speed(struct dmfe_board_info *);
 319static void dmfe_process_mode(struct dmfe_board_info *);
 320static void dmfe_timer(struct timer_list *);
 321static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
 322static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
 323static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
 324static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
 325static void dmfe_dynamic_reset(struct net_device *);
 326static void dmfe_free_rxbuffer(struct dmfe_board_info *);
 327static void dmfe_init_dm910x(struct net_device *);
 328static void dmfe_parse_srom(struct dmfe_board_info *);
 329static void dmfe_program_DM9801(struct dmfe_board_info *, int);
 330static void dmfe_program_DM9802(struct dmfe_board_info *);
 331static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
 332static void dmfe_set_phyxcer(struct dmfe_board_info *);
 333
 334/* DM910X network board routine ---------------------------- */
 335
 336static const struct net_device_ops netdev_ops = {
 337        .ndo_open               = dmfe_open,
 338        .ndo_stop               = dmfe_stop,
 339        .ndo_start_xmit         = dmfe_start_xmit,
 340        .ndo_set_rx_mode        = dmfe_set_filter_mode,
 341        .ndo_set_mac_address    = eth_mac_addr,
 342        .ndo_validate_addr      = eth_validate_addr,
 343#ifdef CONFIG_NET_POLL_CONTROLLER
 344        .ndo_poll_controller    = poll_dmfe,
 345#endif
 346};
 347
 348/*
 349 *      Search DM910X board ,allocate space and register it
 350 */
 351
 352static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 353{
 354        struct dmfe_board_info *db;     /* board information structure */
 355        struct net_device *dev;
 356        u32 pci_pmr;
 357        int i, err;
 358
 359        DMFE_DBUG(0, "dmfe_init_one()", 0);
 360
 361        /*
 362         *      SPARC on-board DM910x chips should be handled by the main
 363         *      tulip driver, except for early DM9100s.
 364         */
 365#ifdef CONFIG_TULIP_DM910X
 366        if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
 367            ent->driver_data == PCI_DM9102_ID) {
 368                struct device_node *dp = pci_device_to_OF_node(pdev);
 369
 370                if (dp && of_get_property(dp, "local-mac-address", NULL)) {
 371                        pr_info("skipping on-board DM910x (use tulip)\n");
 372                        return -ENODEV;
 373                }
 374        }
 375#endif
 376
 377        /* Init network device */
 378        dev = alloc_etherdev(sizeof(*db));
 379        if (dev == NULL)
 380                return -ENOMEM;
 381        SET_NETDEV_DEV(dev, &pdev->dev);
 382
 383        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 384                pr_warn("32-bit PCI DMA not available\n");
 385                err = -ENODEV;
 386                goto err_out_free;
 387        }
 388
 389        /* Enable Master/IO access, Disable memory access */
 390        err = pci_enable_device(pdev);
 391        if (err)
 392                goto err_out_free;
 393
 394        if (!pci_resource_start(pdev, 0)) {
 395                pr_err("I/O base is zero\n");
 396                err = -ENODEV;
 397                goto err_out_disable;
 398        }
 399
 400        if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
 401                pr_err("Allocated I/O size too small\n");
 402                err = -ENODEV;
 403                goto err_out_disable;
 404        }
 405
 406#if 0   /* pci_{enable_device,set_master} sets minimum latency for us now */
 407
 408        /* Set Latency Timer 80h */
 409        /* FIXME: setting values > 32 breaks some SiS 559x stuff.
 410           Need a PCI quirk.. */
 411
 412        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
 413#endif
 414
 415        if (pci_request_regions(pdev, DRV_NAME)) {
 416                pr_err("Failed to request PCI regions\n");
 417                err = -ENODEV;
 418                goto err_out_disable;
 419        }
 420
 421        /* Init system & device */
 422        db = netdev_priv(dev);
 423
 424        /* Allocate Tx/Rx descriptor memory */
 425        db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
 426                        DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
 427        if (!db->desc_pool_ptr) {
 428                err = -ENOMEM;
 429                goto err_out_res;
 430        }
 431
 432        db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
 433                        TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
 434        if (!db->buf_pool_ptr) {
 435                err = -ENOMEM;
 436                goto err_out_free_desc;
 437        }
 438
 439        db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 440        db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 441        db->buf_pool_start = db->buf_pool_ptr;
 442        db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 443
 444        db->chip_id = ent->driver_data;
 445        /* IO type range. */
 446        db->ioaddr = pci_iomap(pdev, 0, 0);
 447        if (!db->ioaddr) {
 448                err = -ENOMEM;
 449                goto err_out_free_buf;
 450        }
 451
 452        db->chip_revision = pdev->revision;
 453        db->wol_mode = 0;
 454
 455        db->pdev = pdev;
 456
 457        pci_set_drvdata(pdev, dev);
 458        dev->netdev_ops = &netdev_ops;
 459        dev->ethtool_ops = &netdev_ethtool_ops;
 460        netif_carrier_off(dev);
 461        spin_lock_init(&db->lock);
 462
 463        pci_read_config_dword(pdev, 0x50, &pci_pmr);
 464        pci_pmr &= 0x70000;
 465        if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
 466                db->chip_type = 1;      /* DM9102A E3 */
 467        else
 468                db->chip_type = 0;
 469
 470        /* read 64 word srom data */
 471        for (i = 0; i < 64; i++) {
 472                ((__le16 *) db->srom)[i] =
 473                        cpu_to_le16(read_srom_word(db->ioaddr, i));
 474        }
 475
 476        /* Set Node address */
 477        for (i = 0; i < 6; i++)
 478                dev->dev_addr[i] = db->srom[20 + i];
 479
 480        err = register_netdev (dev);
 481        if (err)
 482                goto err_out_unmap;
 483
 484        dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
 485                 ent->driver_data >> 16,
 486                 pci_name(pdev), dev->dev_addr, pdev->irq);
 487
 488        pci_set_master(pdev);
 489
 490        return 0;
 491
 492err_out_unmap:
 493        pci_iounmap(pdev, db->ioaddr);
 494err_out_free_buf:
 495        pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 496                            db->buf_pool_ptr, db->buf_pool_dma_ptr);
 497err_out_free_desc:
 498        pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 499                            db->desc_pool_ptr, db->desc_pool_dma_ptr);
 500err_out_res:
 501        pci_release_regions(pdev);
 502err_out_disable:
 503        pci_disable_device(pdev);
 504err_out_free:
 505        free_netdev(dev);
 506
 507        return err;
 508}
 509
 510
 511static void dmfe_remove_one(struct pci_dev *pdev)
 512{
 513        struct net_device *dev = pci_get_drvdata(pdev);
 514        struct dmfe_board_info *db = netdev_priv(dev);
 515
 516        DMFE_DBUG(0, "dmfe_remove_one()", 0);
 517
 518        if (dev) {
 519
 520                unregister_netdev(dev);
 521                pci_iounmap(db->pdev, db->ioaddr);
 522                pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 523                                        DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
 524                                        db->desc_pool_dma_ptr);
 525                pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 526                                        db->buf_pool_ptr, db->buf_pool_dma_ptr);
 527                pci_release_regions(pdev);
 528                free_netdev(dev);       /* free board information */
 529        }
 530
 531        DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
 532}
 533
 534
 535/*
 536 *      Open the interface.
 537 *      The interface is opened whenever "ifconfig" actives it.
 538 */
 539
 540static int dmfe_open(struct net_device *dev)
 541{
 542        struct dmfe_board_info *db = netdev_priv(dev);
 543        const int irq = db->pdev->irq;
 544        int ret;
 545
 546        DMFE_DBUG(0, "dmfe_open", 0);
 547
 548        ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
 549        if (ret)
 550                return ret;
 551
 552        /* system variable init */
 553        db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
 554        db->tx_packet_cnt = 0;
 555        db->tx_queue_cnt = 0;
 556        db->rx_avail_cnt = 0;
 557        db->wait_reset = 0;
 558
 559        db->first_in_callback = 0;
 560        db->NIC_capability = 0xf;       /* All capability*/
 561        db->PHY_reg4 = 0x1e0;
 562
 563        /* CR6 operation mode decision */
 564        if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
 565                (db->chip_revision >= 0x30) ) {
 566                db->cr6_data |= DMFE_TXTH_256;
 567                db->cr0_data = CR0_DEFAULT;
 568                db->dm910x_chk_mode=4;          /* Enter the normal mode */
 569        } else {
 570                db->cr6_data |= CR6_SFT;        /* Store & Forward mode */
 571                db->cr0_data = 0;
 572                db->dm910x_chk_mode = 1;        /* Enter the check mode */
 573        }
 574
 575        /* Initialize DM910X board */
 576        dmfe_init_dm910x(dev);
 577
 578        /* Active System Interface */
 579        netif_wake_queue(dev);
 580
 581        /* set and active a timer process */
 582        timer_setup(&db->timer, dmfe_timer, 0);
 583        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
 584        add_timer(&db->timer);
 585
 586        return 0;
 587}
 588
 589
 590/*      Initialize DM910X board
 591 *      Reset DM910X board
 592 *      Initialize TX/Rx descriptor chain structure
 593 *      Send the set-up frame
 594 *      Enable Tx/Rx machine
 595 */
 596
 597static void dmfe_init_dm910x(struct net_device *dev)
 598{
 599        struct dmfe_board_info *db = netdev_priv(dev);
 600        void __iomem *ioaddr = db->ioaddr;
 601
 602        DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 603
 604        /* Reset DM910x MAC controller */
 605        dw32(DCR0, DM910X_RESET);       /* RESET MAC */
 606        udelay(100);
 607        dw32(DCR0, db->cr0_data);
 608        udelay(5);
 609
 610        /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
 611        db->phy_addr = 1;
 612
 613        /* Parser SROM and media mode */
 614        dmfe_parse_srom(db);
 615        db->media_mode = dmfe_media_mode;
 616
 617        /* RESET Phyxcer Chip by GPR port bit 7 */
 618        dw32(DCR12, 0x180);             /* Let bit 7 output port */
 619        if (db->chip_id == PCI_DM9009_ID) {
 620                dw32(DCR12, 0x80);      /* Issue RESET signal */
 621                mdelay(300);                    /* Delay 300 ms */
 622        }
 623        dw32(DCR12, 0x0);       /* Clear RESET signal */
 624
 625        /* Process Phyxcer Media Mode */
 626        if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
 627                dmfe_set_phyxcer(db);
 628
 629        /* Media Mode Process */
 630        if ( !(db->media_mode & DMFE_AUTO) )
 631                db->op_mode = db->media_mode;   /* Force Mode */
 632
 633        /* Initialize Transmit/Receive descriptor and CR3/4 */
 634        dmfe_descriptor_init(dev);
 635
 636        /* Init CR6 to program DM910x operation */
 637        update_cr6(db->cr6_data, ioaddr);
 638
 639        /* Send setup frame */
 640        if (db->chip_id == PCI_DM9132_ID)
 641                dm9132_id_table(dev);   /* DM9132 */
 642        else
 643                send_filter_frame(dev); /* DM9102/DM9102A */
 644
 645        /* Init CR7, interrupt active bit */
 646        db->cr7_data = CR7_DEFAULT;
 647        dw32(DCR7, db->cr7_data);
 648
 649        /* Init CR15, Tx jabber and Rx watchdog timer */
 650        dw32(DCR15, db->cr15_data);
 651
 652        /* Enable DM910X Tx/Rx function */
 653        db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
 654        update_cr6(db->cr6_data, ioaddr);
 655}
 656
 657
 658/*
 659 *      Hardware start transmission.
 660 *      Send a packet to media from the upper layer.
 661 */
 662
 663static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 664                                         struct net_device *dev)
 665{
 666        struct dmfe_board_info *db = netdev_priv(dev);
 667        void __iomem *ioaddr = db->ioaddr;
 668        struct tx_desc *txptr;
 669        unsigned long flags;
 670
 671        DMFE_DBUG(0, "dmfe_start_xmit", 0);
 672
 673        /* Too large packet check */
 674        if (skb->len > MAX_PACKET_SIZE) {
 675                pr_err("big packet = %d\n", (u16)skb->len);
 676                dev_kfree_skb_any(skb);
 677                return NETDEV_TX_OK;
 678        }
 679
 680        /* Resource flag check */
 681        netif_stop_queue(dev);
 682
 683        spin_lock_irqsave(&db->lock, flags);
 684
 685        /* No Tx resource check, it never happen nromally */
 686        if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
 687                spin_unlock_irqrestore(&db->lock, flags);
 688                pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
 689                return NETDEV_TX_BUSY;
 690        }
 691
 692        /* Disable NIC interrupt */
 693        dw32(DCR7, 0);
 694
 695        /* transmit this packet */
 696        txptr = db->tx_insert_ptr;
 697        skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
 698        txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
 699
 700        /* Point to next transmit free descriptor */
 701        db->tx_insert_ptr = txptr->next_tx_desc;
 702
 703        /* Transmit Packet Process */
 704        if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
 705                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 706                db->tx_packet_cnt++;                    /* Ready to send */
 707                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 708                netif_trans_update(dev);                /* saved time stamp */
 709        } else {
 710                db->tx_queue_cnt++;                     /* queue TX packet */
 711                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 712        }
 713
 714        /* Tx resource check */
 715        if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
 716                netif_wake_queue(dev);
 717
 718        /* Restore CR7 to enable interrupt */
 719        spin_unlock_irqrestore(&db->lock, flags);
 720        dw32(DCR7, db->cr7_data);
 721
 722        /* free this SKB */
 723        dev_consume_skb_any(skb);
 724
 725        return NETDEV_TX_OK;
 726}
 727
 728
 729/*
 730 *      Stop the interface.
 731 *      The interface is stopped when it is brought.
 732 */
 733
 734static int dmfe_stop(struct net_device *dev)
 735{
 736        struct dmfe_board_info *db = netdev_priv(dev);
 737        void __iomem *ioaddr = db->ioaddr;
 738
 739        DMFE_DBUG(0, "dmfe_stop", 0);
 740
 741        /* disable system */
 742        netif_stop_queue(dev);
 743
 744        /* deleted timer */
 745        del_timer_sync(&db->timer);
 746
 747        /* Reset & stop DM910X board */
 748        dw32(DCR0, DM910X_RESET);
 749        udelay(100);
 750        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 751
 752        /* free interrupt */
 753        free_irq(db->pdev->irq, dev);
 754
 755        /* free allocated rx buffer */
 756        dmfe_free_rxbuffer(db);
 757
 758#if 0
 759        /* show statistic counter */
 760        printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
 761               db->tx_fifo_underrun, db->tx_excessive_collision,
 762               db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
 763               db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
 764               db->reset_fatal, db->reset_TXtimeout);
 765#endif
 766
 767        return 0;
 768}
 769
 770
 771/*
 772 *      DM9102 insterrupt handler
 773 *      receive the packet to upper layer, free the transmitted packet
 774 */
 775
 776static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 777{
 778        struct net_device *dev = dev_id;
 779        struct dmfe_board_info *db = netdev_priv(dev);
 780        void __iomem *ioaddr = db->ioaddr;
 781        unsigned long flags;
 782
 783        DMFE_DBUG(0, "dmfe_interrupt()", 0);
 784
 785        spin_lock_irqsave(&db->lock, flags);
 786
 787        /* Got DM910X status */
 788        db->cr5_data = dr32(DCR5);
 789        dw32(DCR5, db->cr5_data);
 790        if ( !(db->cr5_data & 0xc1) ) {
 791                spin_unlock_irqrestore(&db->lock, flags);
 792                return IRQ_HANDLED;
 793        }
 794
 795        /* Disable all interrupt in CR7 to solve the interrupt edge problem */
 796        dw32(DCR7, 0);
 797
 798        /* Check system status */
 799        if (db->cr5_data & 0x2000) {
 800                /* system bus error happen */
 801                DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
 802                db->reset_fatal++;
 803                db->wait_reset = 1;     /* Need to RESET */
 804                spin_unlock_irqrestore(&db->lock, flags);
 805                return IRQ_HANDLED;
 806        }
 807
 808         /* Received the coming packet */
 809        if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
 810                dmfe_rx_packet(dev, db);
 811
 812        /* reallocate rx descriptor buffer */
 813        if (db->rx_avail_cnt<RX_DESC_CNT)
 814                allocate_rx_buffer(dev);
 815
 816        /* Free the transmitted descriptor */
 817        if ( db->cr5_data & 0x01)
 818                dmfe_free_tx_pkt(dev, db);
 819
 820        /* Mode Check */
 821        if (db->dm910x_chk_mode & 0x2) {
 822                db->dm910x_chk_mode = 0x4;
 823                db->cr6_data |= 0x100;
 824                update_cr6(db->cr6_data, ioaddr);
 825        }
 826
 827        /* Restore CR7 to enable interrupt mask */
 828        dw32(DCR7, db->cr7_data);
 829
 830        spin_unlock_irqrestore(&db->lock, flags);
 831        return IRQ_HANDLED;
 832}
 833
 834
 835#ifdef CONFIG_NET_POLL_CONTROLLER
 836/*
 837 * Polling 'interrupt' - used by things like netconsole to send skbs
 838 * without having to re-enable interrupts. It's not called while
 839 * the interrupt routine is executing.
 840 */
 841
 842static void poll_dmfe (struct net_device *dev)
 843{
 844        struct dmfe_board_info *db = netdev_priv(dev);
 845        const int irq = db->pdev->irq;
 846
 847        /* disable_irq here is not very nice, but with the lockless
 848           interrupt handler we have no other choice. */
 849        disable_irq(irq);
 850        dmfe_interrupt (irq, dev);
 851        enable_irq(irq);
 852}
 853#endif
 854
 855/*
 856 *      Free TX resource after TX complete
 857 */
 858
 859static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
 860{
 861        struct tx_desc *txptr;
 862        void __iomem *ioaddr = db->ioaddr;
 863        u32 tdes0;
 864
 865        txptr = db->tx_remove_ptr;
 866        while(db->tx_packet_cnt) {
 867                tdes0 = le32_to_cpu(txptr->tdes0);
 868                if (tdes0 & 0x80000000)
 869                        break;
 870
 871                /* A packet sent completed */
 872                db->tx_packet_cnt--;
 873                dev->stats.tx_packets++;
 874
 875                /* Transmit statistic counter */
 876                if ( tdes0 != 0x7fffffff ) {
 877                        dev->stats.collisions += (tdes0 >> 3) & 0xf;
 878                        dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
 879                        if (tdes0 & TDES0_ERR_MASK) {
 880                                dev->stats.tx_errors++;
 881
 882                                if (tdes0 & 0x0002) {   /* UnderRun */
 883                                        db->tx_fifo_underrun++;
 884                                        if ( !(db->cr6_data & CR6_SFT) ) {
 885                                                db->cr6_data = db->cr6_data | CR6_SFT;
 886                                                update_cr6(db->cr6_data, ioaddr);
 887                                        }
 888                                }
 889                                if (tdes0 & 0x0100)
 890                                        db->tx_excessive_collision++;
 891                                if (tdes0 & 0x0200)
 892                                        db->tx_late_collision++;
 893                                if (tdes0 & 0x0400)
 894                                        db->tx_no_carrier++;
 895                                if (tdes0 & 0x0800)
 896                                        db->tx_loss_carrier++;
 897                                if (tdes0 & 0x4000)
 898                                        db->tx_jabber_timeout++;
 899                        }
 900                }
 901
 902                txptr = txptr->next_tx_desc;
 903        }/* End of while */
 904
 905        /* Update TX remove pointer to next */
 906        db->tx_remove_ptr = txptr;
 907
 908        /* Send the Tx packet in queue */
 909        if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
 910                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 911                db->tx_packet_cnt++;                    /* Ready to send */
 912                db->tx_queue_cnt--;
 913                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 914                netif_trans_update(dev);                /* saved time stamp */
 915        }
 916
 917        /* Resource available check */
 918        if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
 919                netif_wake_queue(dev);  /* Active upper layer, send again */
 920}
 921
 922
 923/*
 924 *      Calculate the CRC valude of the Rx packet
 925 *      flag =  1 : return the reverse CRC (for the received packet CRC)
 926 *              0 : return the normal CRC (for Hash Table index)
 927 */
 928
 929static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
 930{
 931        u32 crc = crc32(~0, Data, Len);
 932        if (flag) crc = ~crc;
 933        return crc;
 934}
 935
 936
 937/*
 938 *      Receive the come packet and pass to upper layer
 939 */
 940
 941static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
 942{
 943        struct rx_desc *rxptr;
 944        struct sk_buff *skb, *newskb;
 945        int rxlen;
 946        u32 rdes0;
 947
 948        rxptr = db->rx_ready_ptr;
 949
 950        while(db->rx_avail_cnt) {
 951                rdes0 = le32_to_cpu(rxptr->rdes0);
 952                if (rdes0 & 0x80000000) /* packet owner check */
 953                        break;
 954
 955                db->rx_avail_cnt--;
 956                db->interval_rx_cnt++;
 957
 958                pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
 959                                 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
 960
 961                if ( (rdes0 & 0x300) != 0x300) {
 962                        /* A packet without First/Last flag */
 963                        /* reuse this SKB */
 964                        DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
 965                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
 966                } else {
 967                        /* A packet with First/Last flag */
 968                        rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
 969
 970                        /* error summary bit check */
 971                        if (rdes0 & 0x8000) {
 972                                /* This is a error packet */
 973                                dev->stats.rx_errors++;
 974                                if (rdes0 & 1)
 975                                        dev->stats.rx_fifo_errors++;
 976                                if (rdes0 & 2)
 977                                        dev->stats.rx_crc_errors++;
 978                                if (rdes0 & 0x80)
 979                                        dev->stats.rx_length_errors++;
 980                        }
 981
 982                        if ( !(rdes0 & 0x8000) ||
 983                                ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
 984                                skb = rxptr->rx_skb_ptr;
 985
 986                                /* Received Packet CRC check need or not */
 987                                if ( (db->dm910x_chk_mode & 1) &&
 988                                        (cal_CRC(skb->data, rxlen, 1) !=
 989                                        (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
 990                                        /* Found a error received packet */
 991                                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
 992                                        db->dm910x_chk_mode = 3;
 993                                } else {
 994                                        /* Good packet, send to upper layer */
 995                                        /* Shorst packet used new SKB */
 996                                        if ((rxlen < RX_COPY_SIZE) &&
 997                                                ((newskb = netdev_alloc_skb(dev, rxlen + 2))
 998                                                != NULL)) {
 999
1000                                                skb = newskb;
1001                                                /* size less than COPY_SIZE, allocate a rxlen SKB */
1002                                                skb_reserve(skb, 2); /* 16byte align */
1003                                                skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1004                                                          skb_put(skb, rxlen),
1005                                                                          rxlen);
1006                                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1007                                        } else
1008                                                skb_put(skb, rxlen);
1009
1010                                        skb->protocol = eth_type_trans(skb, dev);
1011                                        netif_rx(skb);
1012                                        dev->stats.rx_packets++;
1013                                        dev->stats.rx_bytes += rxlen;
1014                                }
1015                        } else {
1016                                /* Reuse SKB buffer when the packet is error */
1017                                DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1018                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1019                        }
1020                }
1021
1022                rxptr = rxptr->next_rx_desc;
1023        }
1024
1025        db->rx_ready_ptr = rxptr;
1026}
1027
1028/*
1029 * Set DM910X multicast address
1030 */
1031
1032static void dmfe_set_filter_mode(struct net_device *dev)
1033{
1034        struct dmfe_board_info *db = netdev_priv(dev);
1035        unsigned long flags;
1036        int mc_count = netdev_mc_count(dev);
1037
1038        DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1039        spin_lock_irqsave(&db->lock, flags);
1040
1041        if (dev->flags & IFF_PROMISC) {
1042                DMFE_DBUG(0, "Enable PROM Mode", 0);
1043                db->cr6_data |= CR6_PM | CR6_PBF;
1044                update_cr6(db->cr6_data, db->ioaddr);
1045                spin_unlock_irqrestore(&db->lock, flags);
1046                return;
1047        }
1048
1049        if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1050                DMFE_DBUG(0, "Pass all multicast address", mc_count);
1051                db->cr6_data &= ~(CR6_PM | CR6_PBF);
1052                db->cr6_data |= CR6_PAM;
1053                spin_unlock_irqrestore(&db->lock, flags);
1054                return;
1055        }
1056
1057        DMFE_DBUG(0, "Set multicast address", mc_count);
1058        if (db->chip_id == PCI_DM9132_ID)
1059                dm9132_id_table(dev);   /* DM9132 */
1060        else
1061                send_filter_frame(dev); /* DM9102/DM9102A */
1062        spin_unlock_irqrestore(&db->lock, flags);
1063}
1064
1065/*
1066 *      Ethtool interace
1067 */
1068
1069static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1070                               struct ethtool_drvinfo *info)
1071{
1072        struct dmfe_board_info *np = netdev_priv(dev);
1073
1074        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1075        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1076}
1077
1078static int dmfe_ethtool_set_wol(struct net_device *dev,
1079                                struct ethtool_wolinfo *wolinfo)
1080{
1081        struct dmfe_board_info *db = netdev_priv(dev);
1082
1083        if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1084                                WAKE_ARP | WAKE_MAGICSECURE))
1085                   return -EOPNOTSUPP;
1086
1087        db->wol_mode = wolinfo->wolopts;
1088        return 0;
1089}
1090
1091static void dmfe_ethtool_get_wol(struct net_device *dev,
1092                                 struct ethtool_wolinfo *wolinfo)
1093{
1094        struct dmfe_board_info *db = netdev_priv(dev);
1095
1096        wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1097        wolinfo->wolopts = db->wol_mode;
1098}
1099
1100
1101static const struct ethtool_ops netdev_ethtool_ops = {
1102        .get_drvinfo            = dmfe_ethtool_get_drvinfo,
1103        .get_link               = ethtool_op_get_link,
1104        .set_wol                = dmfe_ethtool_set_wol,
1105        .get_wol                = dmfe_ethtool_get_wol,
1106};
1107
1108/*
1109 *      A periodic timer routine
1110 *      Dynamic media sense, allocate Rx buffer...
1111 */
1112
1113static void dmfe_timer(struct timer_list *t)
1114{
1115        struct dmfe_board_info *db = from_timer(db, t, timer);
1116        struct net_device *dev = pci_get_drvdata(db->pdev);
1117        void __iomem *ioaddr = db->ioaddr;
1118        u32 tmp_cr8;
1119        unsigned char tmp_cr12;
1120        unsigned long flags;
1121
1122        int link_ok, link_ok_phy;
1123
1124        DMFE_DBUG(0, "dmfe_timer()", 0);
1125        spin_lock_irqsave(&db->lock, flags);
1126
1127        /* Media mode process when Link OK before enter this route */
1128        if (db->first_in_callback == 0) {
1129                db->first_in_callback = 1;
1130                if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1131                        db->cr6_data &= ~0x40000;
1132                        update_cr6(db->cr6_data, ioaddr);
1133                        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1134                        db->cr6_data |= 0x40000;
1135                        update_cr6(db->cr6_data, ioaddr);
1136                        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1137                        add_timer(&db->timer);
1138                        spin_unlock_irqrestore(&db->lock, flags);
1139                        return;
1140                }
1141        }
1142
1143
1144        /* Operating Mode Check */
1145        if ( (db->dm910x_chk_mode & 0x1) &&
1146                (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1147                db->dm910x_chk_mode = 0x4;
1148
1149        /* Dynamic reset DM910X : system error or transmit time-out */
1150        tmp_cr8 = dr32(DCR8);
1151        if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1152                db->reset_cr8++;
1153                db->wait_reset = 1;
1154        }
1155        db->interval_rx_cnt = 0;
1156
1157        /* TX polling kick monitor */
1158        if ( db->tx_packet_cnt &&
1159             time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1160                dw32(DCR1, 0x1);   /* Tx polling again */
1161
1162                /* TX Timeout */
1163                if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1164                        db->reset_TXtimeout++;
1165                        db->wait_reset = 1;
1166                        dev_warn(&dev->dev, "Tx timeout - resetting\n");
1167                }
1168        }
1169
1170        if (db->wait_reset) {
1171                DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1172                db->reset_count++;
1173                dmfe_dynamic_reset(dev);
1174                db->first_in_callback = 0;
1175                db->timer.expires = DMFE_TIMER_WUT;
1176                add_timer(&db->timer);
1177                spin_unlock_irqrestore(&db->lock, flags);
1178                return;
1179        }
1180
1181        /* Link status check, Dynamic media type change */
1182        if (db->chip_id == PCI_DM9132_ID)
1183                tmp_cr12 = dr8(DCR9 + 3);       /* DM9132 */
1184        else
1185                tmp_cr12 = dr8(DCR12);          /* DM9102/DM9102A */
1186
1187        if ( ((db->chip_id == PCI_DM9102_ID) &&
1188                (db->chip_revision == 0x30)) ||
1189                ((db->chip_id == PCI_DM9132_ID) &&
1190                (db->chip_revision == 0x10)) ) {
1191                /* DM9102A Chip */
1192                if (tmp_cr12 & 2)
1193                        link_ok = 0;
1194                else
1195                        link_ok = 1;
1196        }
1197        else
1198                /*0x43 is used instead of 0x3 because bit 6 should represent
1199                        link status of external PHY */
1200                link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1201
1202
1203        /* If chip reports that link is failed it could be because external
1204                PHY link status pin is not connected correctly to chip
1205                To be sure ask PHY too.
1206        */
1207
1208        /* need a dummy read because of PHY's register latch*/
1209        dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1210        link_ok_phy = (dmfe_phy_read (db->ioaddr,
1211                                      db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1212
1213        if (link_ok_phy != link_ok) {
1214                DMFE_DBUG (0, "PHY and chip report different link status", 0);
1215                link_ok = link_ok | link_ok_phy;
1216        }
1217
1218        if ( !link_ok && netif_carrier_ok(dev)) {
1219                /* Link Failed */
1220                DMFE_DBUG(0, "Link Failed", tmp_cr12);
1221                netif_carrier_off(dev);
1222
1223                /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1224                /* AUTO or force 1M Homerun/Longrun don't need */
1225                if ( !(db->media_mode & 0x38) )
1226                        dmfe_phy_write(db->ioaddr, db->phy_addr,
1227                                       0, 0x1000, db->chip_id);
1228
1229                /* AUTO mode, if INT phyxcer link failed, select EXT device */
1230                if (db->media_mode & DMFE_AUTO) {
1231                        /* 10/100M link failed, used 1M Home-Net */
1232                        db->cr6_data|=0x00040000;       /* bit18=1, MII */
1233                        db->cr6_data&=~0x00000200;      /* bit9=0, HD mode */
1234                        update_cr6(db->cr6_data, ioaddr);
1235                }
1236        } else if (!netif_carrier_ok(dev)) {
1237
1238                DMFE_DBUG(0, "Link link OK", tmp_cr12);
1239
1240                /* Auto Sense Speed */
1241                if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1242                        netif_carrier_on(dev);
1243                        SHOW_MEDIA_TYPE(db->op_mode);
1244                }
1245
1246                dmfe_process_mode(db);
1247        }
1248
1249        /* HPNA remote command check */
1250        if (db->HPNA_command & 0xf00) {
1251                db->HPNA_timer--;
1252                if (!db->HPNA_timer)
1253                        dmfe_HPNA_remote_cmd_chk(db);
1254        }
1255
1256        /* Timer active again */
1257        db->timer.expires = DMFE_TIMER_WUT;
1258        add_timer(&db->timer);
1259        spin_unlock_irqrestore(&db->lock, flags);
1260}
1261
1262
1263/*
1264 *      Dynamic reset the DM910X board
1265 *      Stop DM910X board
1266 *      Free Tx/Rx allocated memory
1267 *      Reset DM910X board
1268 *      Re-initialize DM910X board
1269 */
1270
1271static void dmfe_dynamic_reset(struct net_device *dev)
1272{
1273        struct dmfe_board_info *db = netdev_priv(dev);
1274        void __iomem *ioaddr = db->ioaddr;
1275
1276        DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1277
1278        /* Sopt MAC controller */
1279        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1280        update_cr6(db->cr6_data, ioaddr);
1281        dw32(DCR7, 0);                          /* Disable Interrupt */
1282        dw32(DCR5, dr32(DCR5));
1283
1284        /* Disable upper layer interface */
1285        netif_stop_queue(dev);
1286
1287        /* Free Rx Allocate buffer */
1288        dmfe_free_rxbuffer(db);
1289
1290        /* system variable init */
1291        db->tx_packet_cnt = 0;
1292        db->tx_queue_cnt = 0;
1293        db->rx_avail_cnt = 0;
1294        netif_carrier_off(dev);
1295        db->wait_reset = 0;
1296
1297        /* Re-initialize DM910X board */
1298        dmfe_init_dm910x(dev);
1299
1300        /* Restart upper layer interface */
1301        netif_wake_queue(dev);
1302}
1303
1304
1305/*
1306 *      free all allocated rx buffer
1307 */
1308
1309static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1310{
1311        DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1312
1313        /* free allocated rx buffer */
1314        while (db->rx_avail_cnt) {
1315                dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1316                db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1317                db->rx_avail_cnt--;
1318        }
1319}
1320
1321
1322/*
1323 *      Reuse the SK buffer
1324 */
1325
1326static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1327{
1328        struct rx_desc *rxptr = db->rx_insert_ptr;
1329
1330        if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1331                rxptr->rx_skb_ptr = skb;
1332                rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1333                            skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1334                wmb();
1335                rxptr->rdes0 = cpu_to_le32(0x80000000);
1336                db->rx_avail_cnt++;
1337                db->rx_insert_ptr = rxptr->next_rx_desc;
1338        } else
1339                DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1340}
1341
1342
1343/*
1344 *      Initialize transmit/Receive descriptor
1345 *      Using Chain structure, and allocate Tx/Rx buffer
1346 */
1347
1348static void dmfe_descriptor_init(struct net_device *dev)
1349{
1350        struct dmfe_board_info *db = netdev_priv(dev);
1351        void __iomem *ioaddr = db->ioaddr;
1352        struct tx_desc *tmp_tx;
1353        struct rx_desc *tmp_rx;
1354        unsigned char *tmp_buf;
1355        dma_addr_t tmp_tx_dma, tmp_rx_dma;
1356        dma_addr_t tmp_buf_dma;
1357        int i;
1358
1359        DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1360
1361        /* tx descriptor start pointer */
1362        db->tx_insert_ptr = db->first_tx_desc;
1363        db->tx_remove_ptr = db->first_tx_desc;
1364        dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1365
1366        /* rx descriptor start pointer */
1367        db->first_rx_desc = (void *)db->first_tx_desc +
1368                        sizeof(struct tx_desc) * TX_DESC_CNT;
1369
1370        db->first_rx_desc_dma =  db->first_tx_desc_dma +
1371                        sizeof(struct tx_desc) * TX_DESC_CNT;
1372        db->rx_insert_ptr = db->first_rx_desc;
1373        db->rx_ready_ptr = db->first_rx_desc;
1374        dw32(DCR3, db->first_rx_desc_dma);              /* RX DESC address */
1375
1376        /* Init Transmit chain */
1377        tmp_buf = db->buf_pool_start;
1378        tmp_buf_dma = db->buf_pool_dma_start;
1379        tmp_tx_dma = db->first_tx_desc_dma;
1380        for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1381                tmp_tx->tx_buf_ptr = tmp_buf;
1382                tmp_tx->tdes0 = cpu_to_le32(0);
1383                tmp_tx->tdes1 = cpu_to_le32(0x81000000);        /* IC, chain */
1384                tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1385                tmp_tx_dma += sizeof(struct tx_desc);
1386                tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1387                tmp_tx->next_tx_desc = tmp_tx + 1;
1388                tmp_buf = tmp_buf + TX_BUF_ALLOC;
1389                tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1390        }
1391        (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1392        tmp_tx->next_tx_desc = db->first_tx_desc;
1393
1394         /* Init Receive descriptor chain */
1395        tmp_rx_dma=db->first_rx_desc_dma;
1396        for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1397                tmp_rx->rdes0 = cpu_to_le32(0);
1398                tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1399                tmp_rx_dma += sizeof(struct rx_desc);
1400                tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1401                tmp_rx->next_rx_desc = tmp_rx + 1;
1402        }
1403        (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1404        tmp_rx->next_rx_desc = db->first_rx_desc;
1405
1406        /* pre-allocate Rx buffer */
1407        allocate_rx_buffer(dev);
1408}
1409
1410
1411/*
1412 *      Update CR6 value
1413 *      Firstly stop DM910X , then written value and start
1414 */
1415
1416static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1417{
1418        u32 cr6_tmp;
1419
1420        cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1421        dw32(DCR6, cr6_tmp);
1422        udelay(5);
1423        dw32(DCR6, cr6_data);
1424        udelay(5);
1425}
1426
1427
1428/*
1429 *      Send a setup frame for DM9132
1430 *      This setup frame initialize DM910X address filter mode
1431*/
1432
1433static void dm9132_id_table(struct net_device *dev)
1434{
1435        struct dmfe_board_info *db = netdev_priv(dev);
1436        void __iomem *ioaddr = db->ioaddr + 0xc0;
1437        u16 *addrptr = (u16 *)dev->dev_addr;
1438        struct netdev_hw_addr *ha;
1439        u16 i, hash_table[4];
1440
1441        /* Node address */
1442        for (i = 0; i < 3; i++) {
1443                dw16(0, addrptr[i]);
1444                ioaddr += 4;
1445        }
1446
1447        /* Clear Hash Table */
1448        memset(hash_table, 0, sizeof(hash_table));
1449
1450        /* broadcast address */
1451        hash_table[3] = 0x8000;
1452
1453        /* the multicast address in Hash Table : 64 bits */
1454        netdev_for_each_mc_addr(ha, dev) {
1455                u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1456
1457                hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1458        }
1459
1460        /* Write the hash table to MAC MD table */
1461        for (i = 0; i < 4; i++, ioaddr += 4)
1462                dw16(0, hash_table[i]);
1463}
1464
1465
1466/*
1467 *      Send a setup frame for DM9102/DM9102A
1468 *      This setup frame initialize DM910X address filter mode
1469 */
1470
1471static void send_filter_frame(struct net_device *dev)
1472{
1473        struct dmfe_board_info *db = netdev_priv(dev);
1474        struct netdev_hw_addr *ha;
1475        struct tx_desc *txptr;
1476        u16 * addrptr;
1477        u32 * suptr;
1478        int i;
1479
1480        DMFE_DBUG(0, "send_filter_frame()", 0);
1481
1482        txptr = db->tx_insert_ptr;
1483        suptr = (u32 *) txptr->tx_buf_ptr;
1484
1485        /* Node address */
1486        addrptr = (u16 *) dev->dev_addr;
1487        *suptr++ = addrptr[0];
1488        *suptr++ = addrptr[1];
1489        *suptr++ = addrptr[2];
1490
1491        /* broadcast address */
1492        *suptr++ = 0xffff;
1493        *suptr++ = 0xffff;
1494        *suptr++ = 0xffff;
1495
1496        /* fit the multicast address */
1497        netdev_for_each_mc_addr(ha, dev) {
1498                addrptr = (u16 *) ha->addr;
1499                *suptr++ = addrptr[0];
1500                *suptr++ = addrptr[1];
1501                *suptr++ = addrptr[2];
1502        }
1503
1504        for (i = netdev_mc_count(dev); i < 14; i++) {
1505                *suptr++ = 0xffff;
1506                *suptr++ = 0xffff;
1507                *suptr++ = 0xffff;
1508        }
1509
1510        /* prepare the setup frame */
1511        db->tx_insert_ptr = txptr->next_tx_desc;
1512        txptr->tdes1 = cpu_to_le32(0x890000c0);
1513
1514        /* Resource Check and Send the setup packet */
1515        if (!db->tx_packet_cnt) {
1516                void __iomem *ioaddr = db->ioaddr;
1517
1518                /* Resource Empty */
1519                db->tx_packet_cnt++;
1520                txptr->tdes0 = cpu_to_le32(0x80000000);
1521                update_cr6(db->cr6_data | 0x2000, ioaddr);
1522                dw32(DCR1, 0x1);        /* Issue Tx polling */
1523                update_cr6(db->cr6_data, ioaddr);
1524                netif_trans_update(dev);
1525        } else
1526                db->tx_queue_cnt++;     /* Put in TX queue */
1527}
1528
1529
1530/*
1531 *      Allocate rx buffer,
1532 *      As possible as allocate maxiumn Rx buffer
1533 */
1534
1535static void allocate_rx_buffer(struct net_device *dev)
1536{
1537        struct dmfe_board_info *db = netdev_priv(dev);
1538        struct rx_desc *rxptr;
1539        struct sk_buff *skb;
1540
1541        rxptr = db->rx_insert_ptr;
1542
1543        while(db->rx_avail_cnt < RX_DESC_CNT) {
1544                if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1545                        break;
1546                rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1547                rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1548                                    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1549                wmb();
1550                rxptr->rdes0 = cpu_to_le32(0x80000000);
1551                rxptr = rxptr->next_rx_desc;
1552                db->rx_avail_cnt++;
1553        }
1554
1555        db->rx_insert_ptr = rxptr;
1556}
1557
1558static void srom_clk_write(void __iomem *ioaddr, u32 data)
1559{
1560        static const u32 cmd[] = {
1561                CR9_SROM_READ | CR9_SRCS,
1562                CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1563                CR9_SROM_READ | CR9_SRCS
1564        };
1565        int i;
1566
1567        for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1568                dw32(DCR9, data | cmd[i]);
1569                udelay(5);
1570        }
1571}
1572
1573/*
1574 *      Read one word data from the serial ROM
1575 */
1576static u16 read_srom_word(void __iomem *ioaddr, int offset)
1577{
1578        u16 srom_data;
1579        int i;
1580
1581        dw32(DCR9, CR9_SROM_READ);
1582        udelay(5);
1583        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1584        udelay(5);
1585
1586        /* Send the Read Command 110b */
1587        srom_clk_write(ioaddr, SROM_DATA_1);
1588        srom_clk_write(ioaddr, SROM_DATA_1);
1589        srom_clk_write(ioaddr, SROM_DATA_0);
1590
1591        /* Send the offset */
1592        for (i = 5; i >= 0; i--) {
1593                srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1594                srom_clk_write(ioaddr, srom_data);
1595        }
1596
1597        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1598        udelay(5);
1599
1600        for (i = 16; i > 0; i--) {
1601                dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1602                udelay(5);
1603                srom_data = (srom_data << 1) |
1604                                ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1605                dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1606                udelay(5);
1607        }
1608
1609        dw32(DCR9, CR9_SROM_READ);
1610        udelay(5);
1611        return srom_data;
1612}
1613
1614
1615/*
1616 *      Auto sense the media mode
1617 */
1618
1619static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1620{
1621        void __iomem *ioaddr = db->ioaddr;
1622        u8 ErrFlag = 0;
1623        u16 phy_mode;
1624
1625        /* CR6 bit18=0, select 10/100M */
1626        update_cr6(db->cr6_data & ~0x40000, ioaddr);
1627
1628        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1629        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1630
1631        if ( (phy_mode & 0x24) == 0x24 ) {
1632                if (db->chip_id == PCI_DM9132_ID)       /* DM9132 */
1633                        phy_mode = dmfe_phy_read(db->ioaddr,
1634                                                 db->phy_addr, 7, db->chip_id) & 0xf000;
1635                else                            /* DM9102/DM9102A */
1636                        phy_mode = dmfe_phy_read(db->ioaddr,
1637                                                 db->phy_addr, 17, db->chip_id) & 0xf000;
1638                switch (phy_mode) {
1639                case 0x1000: db->op_mode = DMFE_10MHF; break;
1640                case 0x2000: db->op_mode = DMFE_10MFD; break;
1641                case 0x4000: db->op_mode = DMFE_100MHF; break;
1642                case 0x8000: db->op_mode = DMFE_100MFD; break;
1643                default: db->op_mode = DMFE_10MHF;
1644                        ErrFlag = 1;
1645                        break;
1646                }
1647        } else {
1648                db->op_mode = DMFE_10MHF;
1649                DMFE_DBUG(0, "Link Failed :", phy_mode);
1650                ErrFlag = 1;
1651        }
1652
1653        return ErrFlag;
1654}
1655
1656
1657/*
1658 *      Set 10/100 phyxcer capability
1659 *      AUTO mode : phyxcer register4 is NIC capability
1660 *      Force mode: phyxcer register4 is the force media
1661 */
1662
1663static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1664{
1665        void __iomem *ioaddr = db->ioaddr;
1666        u16 phy_reg;
1667
1668        /* Select 10/100M phyxcer */
1669        db->cr6_data &= ~0x40000;
1670        update_cr6(db->cr6_data, ioaddr);
1671
1672        /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1673        if (db->chip_id == PCI_DM9009_ID) {
1674                phy_reg = dmfe_phy_read(db->ioaddr,
1675                                        db->phy_addr, 18, db->chip_id) & ~0x1000;
1676
1677                dmfe_phy_write(db->ioaddr,
1678                               db->phy_addr, 18, phy_reg, db->chip_id);
1679        }
1680
1681        /* Phyxcer capability setting */
1682        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1683
1684        if (db->media_mode & DMFE_AUTO) {
1685                /* AUTO Mode */
1686                phy_reg |= db->PHY_reg4;
1687        } else {
1688                /* Force Mode */
1689                switch(db->media_mode) {
1690                case DMFE_10MHF: phy_reg |= 0x20; break;
1691                case DMFE_10MFD: phy_reg |= 0x40; break;
1692                case DMFE_100MHF: phy_reg |= 0x80; break;
1693                case DMFE_100MFD: phy_reg |= 0x100; break;
1694                }
1695                if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1696        }
1697
1698        /* Write new capability to Phyxcer Reg4 */
1699        if ( !(phy_reg & 0x01e0)) {
1700                phy_reg|=db->PHY_reg4;
1701                db->media_mode|=DMFE_AUTO;
1702        }
1703        dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1704
1705        /* Restart Auto-Negotiation */
1706        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1707                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1708        if ( !db->chip_type )
1709                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1710}
1711
1712
1713/*
1714 *      Process op-mode
1715 *      AUTO mode : PHY controller in Auto-negotiation Mode
1716 *      Force mode: PHY controller in force mode with HUB
1717 *                      N-way force capability with SWITCH
1718 */
1719
1720static void dmfe_process_mode(struct dmfe_board_info *db)
1721{
1722        u16 phy_reg;
1723
1724        /* Full Duplex Mode Check */
1725        if (db->op_mode & 0x4)
1726                db->cr6_data |= CR6_FDM;        /* Set Full Duplex Bit */
1727        else
1728                db->cr6_data &= ~CR6_FDM;       /* Clear Full Duplex Bit */
1729
1730        /* Transciver Selection */
1731        if (db->op_mode & 0x10)         /* 1M HomePNA */
1732                db->cr6_data |= 0x40000;/* External MII select */
1733        else
1734                db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1735
1736        update_cr6(db->cr6_data, db->ioaddr);
1737
1738        /* 10/100M phyxcer force mode need */
1739        if ( !(db->media_mode & 0x18)) {
1740                /* Forece Mode */
1741                phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1742                if ( !(phy_reg & 0x1) ) {
1743                        /* parter without N-Way capability */
1744                        phy_reg = 0x0;
1745                        switch(db->op_mode) {
1746                        case DMFE_10MHF: phy_reg = 0x0; break;
1747                        case DMFE_10MFD: phy_reg = 0x100; break;
1748                        case DMFE_100MHF: phy_reg = 0x2000; break;
1749                        case DMFE_100MFD: phy_reg = 0x2100; break;
1750                        }
1751                        dmfe_phy_write(db->ioaddr,
1752                                       db->phy_addr, 0, phy_reg, db->chip_id);
1753                        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1754                                mdelay(20);
1755                        dmfe_phy_write(db->ioaddr,
1756                                       db->phy_addr, 0, phy_reg, db->chip_id);
1757                }
1758        }
1759}
1760
1761
1762/*
1763 *      Write a word to Phy register
1764 */
1765
1766static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1767                           u16 phy_data, u32 chip_id)
1768{
1769        u16 i;
1770
1771        if (chip_id == PCI_DM9132_ID) {
1772                dw16(0x80 + offset * 4, phy_data);
1773        } else {
1774                /* DM9102/DM9102A Chip */
1775
1776                /* Send 33 synchronization clock to Phy controller */
1777                for (i = 0; i < 35; i++)
1778                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1779
1780                /* Send start command(01) to Phy */
1781                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1782                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1783
1784                /* Send write command(01) to Phy */
1785                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1786                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1787
1788                /* Send Phy address */
1789                for (i = 0x10; i > 0; i = i >> 1)
1790                        dmfe_phy_write_1bit(ioaddr,
1791                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1792
1793                /* Send register address */
1794                for (i = 0x10; i > 0; i = i >> 1)
1795                        dmfe_phy_write_1bit(ioaddr,
1796                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1797
1798                /* written trasnition */
1799                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1800                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1801
1802                /* Write a word data to PHY controller */
1803                for ( i = 0x8000; i > 0; i >>= 1)
1804                        dmfe_phy_write_1bit(ioaddr,
1805                                            phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1806        }
1807}
1808
1809
1810/*
1811 *      Read a word data from phy register
1812 */
1813
1814static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1815{
1816        int i;
1817        u16 phy_data;
1818
1819        if (chip_id == PCI_DM9132_ID) {
1820                /* DM9132 Chip */
1821                phy_data = dr16(0x80 + offset * 4);
1822        } else {
1823                /* DM9102/DM9102A Chip */
1824
1825                /* Send 33 synchronization clock to Phy controller */
1826                for (i = 0; i < 35; i++)
1827                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1828
1829                /* Send start command(01) to Phy */
1830                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1831                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1832
1833                /* Send read command(10) to Phy */
1834                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1835                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1836
1837                /* Send Phy address */
1838                for (i = 0x10; i > 0; i = i >> 1)
1839                        dmfe_phy_write_1bit(ioaddr,
1840                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1841
1842                /* Send register address */
1843                for (i = 0x10; i > 0; i = i >> 1)
1844                        dmfe_phy_write_1bit(ioaddr,
1845                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1846
1847                /* Skip transition state */
1848                dmfe_phy_read_1bit(ioaddr);
1849
1850                /* read 16bit data */
1851                for (phy_data = 0, i = 0; i < 16; i++) {
1852                        phy_data <<= 1;
1853                        phy_data |= dmfe_phy_read_1bit(ioaddr);
1854                }
1855        }
1856
1857        return phy_data;
1858}
1859
1860
1861/*
1862 *      Write one bit data to Phy Controller
1863 */
1864
1865static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1866{
1867        dw32(DCR9, phy_data);           /* MII Clock Low */
1868        udelay(1);
1869        dw32(DCR9, phy_data | MDCLKH);  /* MII Clock High */
1870        udelay(1);
1871        dw32(DCR9, phy_data);           /* MII Clock Low */
1872        udelay(1);
1873}
1874
1875
1876/*
1877 *      Read one bit phy data from PHY controller
1878 */
1879
1880static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1881{
1882        u16 phy_data;
1883
1884        dw32(DCR9, 0x50000);
1885        udelay(1);
1886        phy_data = (dr32(DCR9) >> 19) & 0x1;
1887        dw32(DCR9, 0x40000);
1888        udelay(1);
1889
1890        return phy_data;
1891}
1892
1893
1894/*
1895 *      Parser SROM and media mode
1896 */
1897
1898static void dmfe_parse_srom(struct dmfe_board_info * db)
1899{
1900        char * srom = db->srom;
1901        int dmfe_mode, tmp_reg;
1902
1903        DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1904
1905        /* Init CR15 */
1906        db->cr15_data = CR15_DEFAULT;
1907
1908        /* Check SROM Version */
1909        if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1910                /* SROM V4.01 */
1911                /* Get NIC support media mode */
1912                db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1913                db->PHY_reg4 = 0;
1914                for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1915                        switch( db->NIC_capability & tmp_reg ) {
1916                        case 0x1: db->PHY_reg4 |= 0x0020; break;
1917                        case 0x2: db->PHY_reg4 |= 0x0040; break;
1918                        case 0x4: db->PHY_reg4 |= 0x0080; break;
1919                        case 0x8: db->PHY_reg4 |= 0x0100; break;
1920                        }
1921                }
1922
1923                /* Media Mode Force or not check */
1924                dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1925                             le32_to_cpup((__le32 *) (srom + 36)));
1926                switch(dmfe_mode) {
1927                case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1928                case 0x2: dmfe_media_mode = DMFE_10MFD; break;  /* 10MFD */
1929                case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1930                case 0x100:
1931                case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1932                }
1933
1934                /* Special Function setting */
1935                /* VLAN function */
1936                if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1937                        db->cr15_data |= 0x40;
1938
1939                /* Flow Control */
1940                if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1941                        db->cr15_data |= 0x400;
1942
1943                /* TX pause packet */
1944                if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1945                        db->cr15_data |= 0x9800;
1946        }
1947
1948        /* Parse HPNA parameter */
1949        db->HPNA_command = 1;
1950
1951        /* Accept remote command or not */
1952        if (HPNA_rx_cmd == 0)
1953                db->HPNA_command |= 0x8000;
1954
1955         /* Issue remote command & operation mode */
1956        if (HPNA_tx_cmd == 1)
1957                switch(HPNA_mode) {     /* Issue Remote Command */
1958                case 0: db->HPNA_command |= 0x0904; break;
1959                case 1: db->HPNA_command |= 0x0a00; break;
1960                case 2: db->HPNA_command |= 0x0506; break;
1961                case 3: db->HPNA_command |= 0x0602; break;
1962                }
1963        else
1964                switch(HPNA_mode) {     /* Don't Issue */
1965                case 0: db->HPNA_command |= 0x0004; break;
1966                case 1: db->HPNA_command |= 0x0000; break;
1967                case 2: db->HPNA_command |= 0x0006; break;
1968                case 3: db->HPNA_command |= 0x0002; break;
1969                }
1970
1971        /* Check DM9801 or DM9802 present or not */
1972        db->HPNA_present = 0;
1973        update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1974        tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1975        if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1976                /* DM9801 or DM9802 present */
1977                db->HPNA_timer = 8;
1978                if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1979                        /* DM9801 HomeRun */
1980                        db->HPNA_present = 1;
1981                        dmfe_program_DM9801(db, tmp_reg);
1982                } else {
1983                        /* DM9802 LongRun */
1984                        db->HPNA_present = 2;
1985                        dmfe_program_DM9802(db);
1986                }
1987        }
1988
1989}
1990
1991
1992/*
1993 *      Init HomeRun DM9801
1994 */
1995
1996static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1997{
1998        uint reg17, reg25;
1999
2000        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2001        switch(HPNA_rev) {
2002        case 0xb900: /* DM9801 E3 */
2003                db->HPNA_command |= 0x1000;
2004                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2005                reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2006                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2007                break;
2008        case 0xb901: /* DM9801 E4 */
2009                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2010                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2011                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2012                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2013                break;
2014        case 0xb902: /* DM9801 E5 */
2015        case 0xb903: /* DM9801 E6 */
2016        default:
2017                db->HPNA_command |= 0x1000;
2018                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2019                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2020                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2021                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2022                break;
2023        }
2024        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2025        dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2026        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2027}
2028
2029
2030/*
2031 *      Init HomeRun DM9802
2032 */
2033
2034static void dmfe_program_DM9802(struct dmfe_board_info * db)
2035{
2036        uint phy_reg;
2037
2038        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2039        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2040        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2041        phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2042        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2043}
2044
2045
2046/*
2047 *      Check remote HPNA power and speed status. If not correct,
2048 *      issue command again.
2049*/
2050
2051static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2052{
2053        uint phy_reg;
2054
2055        /* Got remote device status */
2056        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2057        switch(phy_reg) {
2058        case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2059        case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2060        case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2061        case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2062        }
2063
2064        /* Check remote device status match our setting ot not */
2065        if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2066                dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2067                               db->chip_id);
2068                db->HPNA_timer=8;
2069        } else
2070                db->HPNA_timer=600;     /* Match, every 10 minutes, check */
2071}
2072
2073
2074
2075static const struct pci_device_id dmfe_pci_tbl[] = {
2076        { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2077        { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2078        { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2079        { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2080        { 0, }
2081};
2082MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2083
2084static int __maybe_unused dmfe_suspend(struct device *dev_d)
2085{
2086        struct net_device *dev = dev_get_drvdata(dev_d);
2087        struct dmfe_board_info *db = netdev_priv(dev);
2088        void __iomem *ioaddr = db->ioaddr;
2089
2090        /* Disable upper layer interface */
2091        netif_device_detach(dev);
2092
2093        /* Disable Tx/Rx */
2094        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2095        update_cr6(db->cr6_data, ioaddr);
2096
2097        /* Disable Interrupt */
2098        dw32(DCR7, 0);
2099        dw32(DCR5, dr32(DCR5));
2100
2101        /* Fre RX buffers */
2102        dmfe_free_rxbuffer(db);
2103
2104        /* Enable WOL */
2105        device_wakeup_enable(dev_d);
2106
2107        return 0;
2108}
2109
2110static int __maybe_unused dmfe_resume(struct device *dev_d)
2111{
2112        struct net_device *dev = dev_get_drvdata(dev_d);
2113
2114        /* Re-initialize DM910X board */
2115        dmfe_init_dm910x(dev);
2116
2117        /* Disable WOL */
2118        device_wakeup_disable(dev_d);
2119
2120        /* Restart upper layer interface */
2121        netif_device_attach(dev);
2122
2123        return 0;
2124}
2125
2126static SIMPLE_DEV_PM_OPS(dmfe_pm_ops, dmfe_suspend, dmfe_resume);
2127
2128static struct pci_driver dmfe_driver = {
2129        .name           = "dmfe",
2130        .id_table       = dmfe_pci_tbl,
2131        .probe          = dmfe_init_one,
2132        .remove         = dmfe_remove_one,
2133        .driver.pm      = &dmfe_pm_ops,
2134};
2135
2136MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2137MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2138MODULE_LICENSE("GPL");
2139
2140module_param(debug, int, 0);
2141module_param(mode, byte, 0);
2142module_param(cr6set, int, 0);
2143module_param(chkmode, byte, 0);
2144module_param(HPNA_mode, byte, 0);
2145module_param(HPNA_rx_cmd, byte, 0);
2146module_param(HPNA_tx_cmd, byte, 0);
2147module_param(HPNA_NoiseFloor, byte, 0);
2148module_param(SF_mode, byte, 0);
2149MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2150MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2151                "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2152
2153MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2154                "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2155
2156/*      Description:
2157 *      when user used insmod to add module, system invoked init_module()
2158 *      to initialize and register.
2159 */
2160
2161static int __init dmfe_init_module(void)
2162{
2163        int rc;
2164
2165        DMFE_DBUG(0, "init_module() ", debug);
2166
2167        if (debug)
2168                dmfe_debug = debug;     /* set debug flag */
2169        if (cr6set)
2170                dmfe_cr6_user_set = cr6set;
2171
2172        switch (mode) {
2173        case DMFE_10MHF:
2174        case DMFE_100MHF:
2175        case DMFE_10MFD:
2176        case DMFE_100MFD:
2177        case DMFE_1M_HPNA:
2178                dmfe_media_mode = mode;
2179                break;
2180        default:
2181                dmfe_media_mode = DMFE_AUTO;
2182                break;
2183        }
2184
2185        if (HPNA_mode > 4)
2186                HPNA_mode = 0;          /* Default: LP/HS */
2187        if (HPNA_rx_cmd > 1)
2188                HPNA_rx_cmd = 0;        /* Default: Ignored remote cmd */
2189        if (HPNA_tx_cmd > 1)
2190                HPNA_tx_cmd = 0;        /* Default: Don't issue remote cmd */
2191        if (HPNA_NoiseFloor > 15)
2192                HPNA_NoiseFloor = 0;
2193
2194        rc = pci_register_driver(&dmfe_driver);
2195        if (rc < 0)
2196                return rc;
2197
2198        return 0;
2199}
2200
2201
2202/*
2203 *      Description:
2204 *      when user used rmmod to delete module, system invoked clean_module()
2205 *      to un-register all registered services.
2206 */
2207
2208static void __exit dmfe_cleanup_module(void)
2209{
2210        DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2211        pci_unregister_driver(&dmfe_driver);
2212}
2213
2214module_init(dmfe_init_module);
2215module_exit(dmfe_cleanup_module);
2216