linux/drivers/net/ethernet/dec/tulip/dmfe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
   4    ethernet driver for Linux.
   5    Copyright (C) 1997  Sten Wang
   6
   7
   8    DAVICOM Web-Site: www.davicom.com.tw
   9
  10    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
  11    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
  12
  13    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
  14
  15    Marcelo Tosatti <marcelo@conectiva.com.br> :
  16    Made it compile in 2.3 (device to net_device)
  17
  18    Alan Cox <alan@lxorguk.ukuu.org.uk> :
  19    Cleaned up for kernel merge.
  20    Removed the back compatibility support
  21    Reformatted, fixing spelling etc as I went
  22    Removed IRQ 0-15 assumption
  23
  24    Jeff Garzik <jgarzik@pobox.com> :
  25    Updated to use new PCI driver API.
  26    Resource usage cleanups.
  27    Report driver version to user.
  28
  29    Tobias Ringstrom <tori@unhappy.mine.nu> :
  30    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
  31    Andrew Morton and Frank Davis for the SMP safety fixes.
  32
  33    Vojtech Pavlik <vojtech@suse.cz> :
  34    Cleaned up pointer arithmetics.
  35    Fixed a lot of 64bit issues.
  36    Cleaned up printk()s a bit.
  37    Fixed some obvious big endian problems.
  38
  39    Tobias Ringstrom <tori@unhappy.mine.nu> :
  40    Use time_after for jiffies calculation.  Added ethtool
  41    support.  Updated PCI resource allocation.  Do not
  42    forget to unmap PCI mapped skbs.
  43
  44    Alan Cox <alan@lxorguk.ukuu.org.uk>
  45    Added new PCI identifiers provided by Clear Zhang at ALi
  46    for their 1563 ethernet device.
  47
  48    TODO
  49
  50    Check on 64 bit boxes.
  51    Check and fix on big endian boxes.
  52
  53    Test and make sure PCI latency is now correct for all cases.
  54*/
  55
  56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  57
  58#define DRV_NAME        "dmfe"
  59
  60#include <linux/module.h>
  61#include <linux/kernel.h>
  62#include <linux/string.h>
  63#include <linux/timer.h>
  64#include <linux/ptrace.h>
  65#include <linux/errno.h>
  66#include <linux/ioport.h>
  67#include <linux/interrupt.h>
  68#include <linux/pci.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/init.h>
  71#include <linux/netdevice.h>
  72#include <linux/etherdevice.h>
  73#include <linux/ethtool.h>
  74#include <linux/skbuff.h>
  75#include <linux/delay.h>
  76#include <linux/spinlock.h>
  77#include <linux/crc32.h>
  78#include <linux/bitops.h>
  79
  80#include <asm/processor.h>
  81#include <asm/io.h>
  82#include <asm/dma.h>
  83#include <linux/uaccess.h>
  84#include <asm/irq.h>
  85
  86#ifdef CONFIG_TULIP_DM910X
  87#include <linux/of.h>
  88#endif
  89
  90
  91/* Board/System/Debug information/definition ---------------- */
  92#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
  93#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
  94#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
  95#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
  96
  97#define DM9102_IO_SIZE  0x80
  98#define DM9102A_IO_SIZE 0x100
  99#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
 100#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
 101#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
 102#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)      /* Max TX packet count */
 103#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)      /* TX wakeup count */
 104#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
 105#define TX_BUF_ALLOC    0x600
 106#define RX_ALLOC_SIZE   0x620
 107#define DM910X_RESET    1
 108#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
 109#define CR6_DEFAULT     0x00080000      /* HD */
 110#define CR7_DEFAULT     0x180c1
 111#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
 112#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
 113#define MAX_PACKET_SIZE 1514
 114#define DMFE_MAX_MULTICAST 14
 115#define RX_COPY_SIZE    100
 116#define MAX_CHECK_PACKET 0x8000
 117#define DM9801_NOISE_FLOOR 8
 118#define DM9802_NOISE_FLOOR 5
 119
 120#define DMFE_WOL_LINKCHANGE     0x20000000
 121#define DMFE_WOL_SAMPLEPACKET   0x10000000
 122#define DMFE_WOL_MAGICPACKET    0x08000000
 123
 124
 125#define DMFE_10MHF      0
 126#define DMFE_100MHF     1
 127#define DMFE_10MFD      4
 128#define DMFE_100MFD     5
 129#define DMFE_AUTO       8
 130#define DMFE_1M_HPNA    0x10
 131
 132#define DMFE_TXTH_72    0x400000        /* TX TH 72 byte */
 133#define DMFE_TXTH_96    0x404000        /* TX TH 96 byte */
 134#define DMFE_TXTH_128   0x0000          /* TX TH 128 byte */
 135#define DMFE_TXTH_256   0x4000          /* TX TH 256 byte */
 136#define DMFE_TXTH_512   0x8000          /* TX TH 512 byte */
 137#define DMFE_TXTH_1K    0xC000          /* TX TH 1K  byte */
 138
 139#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
 140#define DMFE_TX_TIMEOUT ((3*HZ)/2)      /* tx packet time-out time 1.5 s" */
 141#define DMFE_TX_KICK    (HZ/2)  /* tx packet Kick-out time 0.5 s" */
 142
 143#define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
 144#define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
 145#define dr32(reg)       ioread32(ioaddr + (reg))
 146#define dr16(reg)       ioread16(ioaddr + (reg))
 147#define dr8(reg)        ioread8(ioaddr + (reg))
 148
 149#define DMFE_DBUG(dbug_now, msg, value)                 \
 150        do {                                            \
 151                if (dmfe_debug || (dbug_now))           \
 152                        pr_err("%s %lx\n",              \
 153                               (msg), (long) (value));  \
 154        } while (0)
 155
 156#define SHOW_MEDIA_TYPE(mode)                           \
 157        pr_info("Change Speed to %sMhz %s duplex\n" ,   \
 158                (mode & 1) ? "100":"10",                \
 159                (mode & 4) ? "full":"half");
 160
 161
 162/* CR9 definition: SROM/MII */
 163#define CR9_SROM_READ   0x4800
 164#define CR9_SRCS        0x1
 165#define CR9_SRCLK       0x2
 166#define CR9_CRDOUT      0x8
 167#define SROM_DATA_0     0x0
 168#define SROM_DATA_1     0x4
 169#define PHY_DATA_1      0x20000
 170#define PHY_DATA_0      0x00000
 171#define MDCLKH          0x10000
 172
 173#define PHY_POWER_DOWN  0x800
 174
 175#define SROM_V41_CODE   0x14
 176
 177#define __CHK_IO_SIZE(pci_id, dev_rev) \
 178 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
 179        DM9102A_IO_SIZE: DM9102_IO_SIZE)
 180
 181#define CHK_IO_SIZE(pci_dev) \
 182        (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
 183        (pci_dev)->revision))
 184
 185/* Structure/enum declaration ------------------------------- */
 186struct tx_desc {
 187        __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
 188        char *tx_buf_ptr;               /* Data for us */
 189        struct tx_desc *next_tx_desc;
 190} __attribute__(( aligned(32) ));
 191
 192struct rx_desc {
 193        __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
 194        struct sk_buff *rx_skb_ptr;     /* Data for us */
 195        struct rx_desc *next_rx_desc;
 196} __attribute__(( aligned(32) ));
 197
 198struct dmfe_board_info {
 199        u32 chip_id;                    /* Chip vendor/Device ID */
 200        u8 chip_revision;               /* Chip revision */
 201        struct net_device *next_dev;    /* next device */
 202        struct pci_dev *pdev;           /* PCI device */
 203        spinlock_t lock;
 204
 205        void __iomem *ioaddr;           /* I/O base address */
 206        u32 cr0_data;
 207        u32 cr5_data;
 208        u32 cr6_data;
 209        u32 cr7_data;
 210        u32 cr15_data;
 211
 212        /* pointer for memory physical address */
 213        dma_addr_t buf_pool_dma_ptr;    /* Tx buffer pool memory */
 214        dma_addr_t buf_pool_dma_start;  /* Tx buffer pool align dword */
 215        dma_addr_t desc_pool_dma_ptr;   /* descriptor pool memory */
 216        dma_addr_t first_tx_desc_dma;
 217        dma_addr_t first_rx_desc_dma;
 218
 219        /* descriptor pointer */
 220        unsigned char *buf_pool_ptr;    /* Tx buffer pool memory */
 221        unsigned char *buf_pool_start;  /* Tx buffer pool align dword */
 222        unsigned char *desc_pool_ptr;   /* descriptor pool memory */
 223        struct tx_desc *first_tx_desc;
 224        struct tx_desc *tx_insert_ptr;
 225        struct tx_desc *tx_remove_ptr;
 226        struct rx_desc *first_rx_desc;
 227        struct rx_desc *rx_insert_ptr;
 228        struct rx_desc *rx_ready_ptr;   /* packet come pointer */
 229        unsigned long tx_packet_cnt;    /* transmitted packet count */
 230        unsigned long tx_queue_cnt;     /* wait to send packet count */
 231        unsigned long rx_avail_cnt;     /* available rx descriptor count */
 232        unsigned long interval_rx_cnt;  /* rx packet count a callback time */
 233
 234        u16 HPNA_command;               /* For HPNA register 16 */
 235        u16 HPNA_timer;                 /* For HPNA remote device check */
 236        u16 dbug_cnt;
 237        u16 NIC_capability;             /* NIC media capability */
 238        u16 PHY_reg4;                   /* Saved Phyxcer register 4 value */
 239
 240        u8 HPNA_present;                /* 0:none, 1:DM9801, 2:DM9802 */
 241        u8 chip_type;                   /* Keep DM9102A chip type */
 242        u8 media_mode;                  /* user specify media mode */
 243        u8 op_mode;                     /* real work media mode */
 244        u8 phy_addr;
 245        u8 wait_reset;                  /* Hardware failed, need to reset */
 246        u8 dm910x_chk_mode;             /* Operating mode check */
 247        u8 first_in_callback;           /* Flag to record state */
 248        u8 wol_mode;                    /* user WOL settings */
 249        struct timer_list timer;
 250
 251        /* Driver defined statistic counter */
 252        unsigned long tx_fifo_underrun;
 253        unsigned long tx_loss_carrier;
 254        unsigned long tx_no_carrier;
 255        unsigned long tx_late_collision;
 256        unsigned long tx_excessive_collision;
 257        unsigned long tx_jabber_timeout;
 258        unsigned long reset_count;
 259        unsigned long reset_cr8;
 260        unsigned long reset_fatal;
 261        unsigned long reset_TXtimeout;
 262
 263        /* NIC SROM data */
 264        unsigned char srom[128];
 265};
 266
 267enum dmfe_offsets {
 268        DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
 269        DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
 270        DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
 271        DCR15 = 0x78
 272};
 273
 274enum dmfe_CR6_bits {
 275        CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
 276        CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
 277        CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
 278};
 279
 280/* Global variable declaration ----------------------------- */
 281static int dmfe_debug;
 282static unsigned char dmfe_media_mode = DMFE_AUTO;
 283static u32 dmfe_cr6_user_set;
 284
 285/* For module input parameter */
 286static int debug;
 287static u32 cr6set;
 288static unsigned char mode = 8;
 289static u8 chkmode = 1;
 290static u8 HPNA_mode;            /* Default: Low Power/High Speed */
 291static u8 HPNA_rx_cmd;          /* Default: Disable Rx remote command */
 292static u8 HPNA_tx_cmd;          /* Default: Don't issue remote command */
 293static u8 HPNA_NoiseFloor;      /* Default: HPNA NoiseFloor */
 294static u8 SF_mode;              /* Special Function: 1:VLAN, 2:RX Flow Control
 295                                   4: TX pause packet */
 296
 297
 298/* function declaration ------------------------------------- */
 299static int dmfe_open(struct net_device *);
 300static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
 301static int dmfe_stop(struct net_device *);
 302static void dmfe_set_filter_mode(struct net_device *);
 303static const struct ethtool_ops netdev_ethtool_ops;
 304static u16 read_srom_word(void __iomem *, int);
 305static irqreturn_t dmfe_interrupt(int , void *);
 306#ifdef CONFIG_NET_POLL_CONTROLLER
 307static void poll_dmfe (struct net_device *dev);
 308#endif
 309static void dmfe_descriptor_init(struct net_device *);
 310static void allocate_rx_buffer(struct net_device *);
 311static void update_cr6(u32, void __iomem *);
 312static void send_filter_frame(struct net_device *);
 313static void dm9132_id_table(struct net_device *);
 314static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
 315static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
 316static void dmfe_phy_write_1bit(void __iomem *, u32);
 317static u16 dmfe_phy_read_1bit(void __iomem *);
 318static u8 dmfe_sense_speed(struct dmfe_board_info *);
 319static void dmfe_process_mode(struct dmfe_board_info *);
 320static void dmfe_timer(struct timer_list *);
 321static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
 322static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
 323static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
 324static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
 325static void dmfe_dynamic_reset(struct net_device *);
 326static void dmfe_free_rxbuffer(struct dmfe_board_info *);
 327static void dmfe_init_dm910x(struct net_device *);
 328static void dmfe_parse_srom(struct dmfe_board_info *);
 329static void dmfe_program_DM9801(struct dmfe_board_info *, int);
 330static void dmfe_program_DM9802(struct dmfe_board_info *);
 331static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
 332static void dmfe_set_phyxcer(struct dmfe_board_info *);
 333
 334/* DM910X network board routine ---------------------------- */
 335
 336static const struct net_device_ops netdev_ops = {
 337        .ndo_open               = dmfe_open,
 338        .ndo_stop               = dmfe_stop,
 339        .ndo_start_xmit         = dmfe_start_xmit,
 340        .ndo_set_rx_mode        = dmfe_set_filter_mode,
 341        .ndo_set_mac_address    = eth_mac_addr,
 342        .ndo_validate_addr      = eth_validate_addr,
 343#ifdef CONFIG_NET_POLL_CONTROLLER
 344        .ndo_poll_controller    = poll_dmfe,
 345#endif
 346};
 347
 348/*
 349 *      Search DM910X board ,allocate space and register it
 350 */
 351
 352static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 353{
 354        struct dmfe_board_info *db;     /* board information structure */
 355        struct net_device *dev;
 356        u32 pci_pmr;
 357        int i, err;
 358
 359        DMFE_DBUG(0, "dmfe_init_one()", 0);
 360
 361        /*
 362         *      SPARC on-board DM910x chips should be handled by the main
 363         *      tulip driver, except for early DM9100s.
 364         */
 365#ifdef CONFIG_TULIP_DM910X
 366        if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
 367            ent->driver_data == PCI_DM9102_ID) {
 368                struct device_node *dp = pci_device_to_OF_node(pdev);
 369
 370                if (dp && of_get_property(dp, "local-mac-address", NULL)) {
 371                        pr_info("skipping on-board DM910x (use tulip)\n");
 372                        return -ENODEV;
 373                }
 374        }
 375#endif
 376
 377        /* Init network device */
 378        dev = alloc_etherdev(sizeof(*db));
 379        if (dev == NULL)
 380                return -ENOMEM;
 381        SET_NETDEV_DEV(dev, &pdev->dev);
 382
 383        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 384                pr_warn("32-bit PCI DMA not available\n");
 385                err = -ENODEV;
 386                goto err_out_free;
 387        }
 388
 389        /* Enable Master/IO access, Disable memory access */
 390        err = pci_enable_device(pdev);
 391        if (err)
 392                goto err_out_free;
 393
 394        if (!pci_resource_start(pdev, 0)) {
 395                pr_err("I/O base is zero\n");
 396                err = -ENODEV;
 397                goto err_out_disable;
 398        }
 399
 400        if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
 401                pr_err("Allocated I/O size too small\n");
 402                err = -ENODEV;
 403                goto err_out_disable;
 404        }
 405
 406#if 0   /* pci_{enable_device,set_master} sets minimum latency for us now */
 407
 408        /* Set Latency Timer 80h */
 409        /* FIXME: setting values > 32 breaks some SiS 559x stuff.
 410           Need a PCI quirk.. */
 411
 412        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
 413#endif
 414
 415        if (pci_request_regions(pdev, DRV_NAME)) {
 416                pr_err("Failed to request PCI regions\n");
 417                err = -ENODEV;
 418                goto err_out_disable;
 419        }
 420
 421        /* Init system & device */
 422        db = netdev_priv(dev);
 423
 424        /* Allocate Tx/Rx descriptor memory */
 425        db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
 426                                               sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 427                                               &db->desc_pool_dma_ptr, GFP_KERNEL);
 428        if (!db->desc_pool_ptr) {
 429                err = -ENOMEM;
 430                goto err_out_res;
 431        }
 432
 433        db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
 434                                              TX_BUF_ALLOC * TX_DESC_CNT + 4,
 435                                              &db->buf_pool_dma_ptr, GFP_KERNEL);
 436        if (!db->buf_pool_ptr) {
 437                err = -ENOMEM;
 438                goto err_out_free_desc;
 439        }
 440
 441        db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 442        db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 443        db->buf_pool_start = db->buf_pool_ptr;
 444        db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 445
 446        db->chip_id = ent->driver_data;
 447        /* IO type range. */
 448        db->ioaddr = pci_iomap(pdev, 0, 0);
 449        if (!db->ioaddr) {
 450                err = -ENOMEM;
 451                goto err_out_free_buf;
 452        }
 453
 454        db->chip_revision = pdev->revision;
 455        db->wol_mode = 0;
 456
 457        db->pdev = pdev;
 458
 459        pci_set_drvdata(pdev, dev);
 460        dev->netdev_ops = &netdev_ops;
 461        dev->ethtool_ops = &netdev_ethtool_ops;
 462        netif_carrier_off(dev);
 463        spin_lock_init(&db->lock);
 464
 465        pci_read_config_dword(pdev, 0x50, &pci_pmr);
 466        pci_pmr &= 0x70000;
 467        if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
 468                db->chip_type = 1;      /* DM9102A E3 */
 469        else
 470                db->chip_type = 0;
 471
 472        /* read 64 word srom data */
 473        for (i = 0; i < 64; i++) {
 474                ((__le16 *) db->srom)[i] =
 475                        cpu_to_le16(read_srom_word(db->ioaddr, i));
 476        }
 477
 478        /* Set Node address */
 479        eth_hw_addr_set(dev, &db->srom[20]);
 480
 481        err = register_netdev (dev);
 482        if (err)
 483                goto err_out_unmap;
 484
 485        dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
 486                 ent->driver_data >> 16,
 487                 pci_name(pdev), dev->dev_addr, pdev->irq);
 488
 489        pci_set_master(pdev);
 490
 491        return 0;
 492
 493err_out_unmap:
 494        pci_iounmap(pdev, db->ioaddr);
 495err_out_free_buf:
 496        dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 497                          db->buf_pool_ptr, db->buf_pool_dma_ptr);
 498err_out_free_desc:
 499        dma_free_coherent(&pdev->dev,
 500                          sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 501                          db->desc_pool_ptr, db->desc_pool_dma_ptr);
 502err_out_res:
 503        pci_release_regions(pdev);
 504err_out_disable:
 505        pci_disable_device(pdev);
 506err_out_free:
 507        free_netdev(dev);
 508
 509        return err;
 510}
 511
 512
 513static void dmfe_remove_one(struct pci_dev *pdev)
 514{
 515        struct net_device *dev = pci_get_drvdata(pdev);
 516        struct dmfe_board_info *db = netdev_priv(dev);
 517
 518        DMFE_DBUG(0, "dmfe_remove_one()", 0);
 519
 520        if (dev) {
 521
 522                unregister_netdev(dev);
 523                pci_iounmap(db->pdev, db->ioaddr);
 524                dma_free_coherent(&db->pdev->dev,
 525                                  sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 526                                  db->desc_pool_ptr, db->desc_pool_dma_ptr);
 527                dma_free_coherent(&db->pdev->dev,
 528                                  TX_BUF_ALLOC * TX_DESC_CNT + 4,
 529                                  db->buf_pool_ptr, db->buf_pool_dma_ptr);
 530                pci_release_regions(pdev);
 531                free_netdev(dev);       /* free board information */
 532        }
 533
 534        DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
 535}
 536
 537
 538/*
 539 *      Open the interface.
 540 *      The interface is opened whenever "ifconfig" actives it.
 541 */
 542
 543static int dmfe_open(struct net_device *dev)
 544{
 545        struct dmfe_board_info *db = netdev_priv(dev);
 546        const int irq = db->pdev->irq;
 547        int ret;
 548
 549        DMFE_DBUG(0, "dmfe_open", 0);
 550
 551        ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
 552        if (ret)
 553                return ret;
 554
 555        /* system variable init */
 556        db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
 557        db->tx_packet_cnt = 0;
 558        db->tx_queue_cnt = 0;
 559        db->rx_avail_cnt = 0;
 560        db->wait_reset = 0;
 561
 562        db->first_in_callback = 0;
 563        db->NIC_capability = 0xf;       /* All capability*/
 564        db->PHY_reg4 = 0x1e0;
 565
 566        /* CR6 operation mode decision */
 567        if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
 568                (db->chip_revision >= 0x30) ) {
 569                db->cr6_data |= DMFE_TXTH_256;
 570                db->cr0_data = CR0_DEFAULT;
 571                db->dm910x_chk_mode=4;          /* Enter the normal mode */
 572        } else {
 573                db->cr6_data |= CR6_SFT;        /* Store & Forward mode */
 574                db->cr0_data = 0;
 575                db->dm910x_chk_mode = 1;        /* Enter the check mode */
 576        }
 577
 578        /* Initialize DM910X board */
 579        dmfe_init_dm910x(dev);
 580
 581        /* Active System Interface */
 582        netif_wake_queue(dev);
 583
 584        /* set and active a timer process */
 585        timer_setup(&db->timer, dmfe_timer, 0);
 586        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
 587        add_timer(&db->timer);
 588
 589        return 0;
 590}
 591
 592
 593/*      Initialize DM910X board
 594 *      Reset DM910X board
 595 *      Initialize TX/Rx descriptor chain structure
 596 *      Send the set-up frame
 597 *      Enable Tx/Rx machine
 598 */
 599
 600static void dmfe_init_dm910x(struct net_device *dev)
 601{
 602        struct dmfe_board_info *db = netdev_priv(dev);
 603        void __iomem *ioaddr = db->ioaddr;
 604
 605        DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 606
 607        /* Reset DM910x MAC controller */
 608        dw32(DCR0, DM910X_RESET);       /* RESET MAC */
 609        udelay(100);
 610        dw32(DCR0, db->cr0_data);
 611        udelay(5);
 612
 613        /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
 614        db->phy_addr = 1;
 615
 616        /* Parser SROM and media mode */
 617        dmfe_parse_srom(db);
 618        db->media_mode = dmfe_media_mode;
 619
 620        /* RESET Phyxcer Chip by GPR port bit 7 */
 621        dw32(DCR12, 0x180);             /* Let bit 7 output port */
 622        if (db->chip_id == PCI_DM9009_ID) {
 623                dw32(DCR12, 0x80);      /* Issue RESET signal */
 624                mdelay(300);                    /* Delay 300 ms */
 625        }
 626        dw32(DCR12, 0x0);       /* Clear RESET signal */
 627
 628        /* Process Phyxcer Media Mode */
 629        if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
 630                dmfe_set_phyxcer(db);
 631
 632        /* Media Mode Process */
 633        if ( !(db->media_mode & DMFE_AUTO) )
 634                db->op_mode = db->media_mode;   /* Force Mode */
 635
 636        /* Initialize Transmit/Receive descriptor and CR3/4 */
 637        dmfe_descriptor_init(dev);
 638
 639        /* Init CR6 to program DM910x operation */
 640        update_cr6(db->cr6_data, ioaddr);
 641
 642        /* Send setup frame */
 643        if (db->chip_id == PCI_DM9132_ID)
 644                dm9132_id_table(dev);   /* DM9132 */
 645        else
 646                send_filter_frame(dev); /* DM9102/DM9102A */
 647
 648        /* Init CR7, interrupt active bit */
 649        db->cr7_data = CR7_DEFAULT;
 650        dw32(DCR7, db->cr7_data);
 651
 652        /* Init CR15, Tx jabber and Rx watchdog timer */
 653        dw32(DCR15, db->cr15_data);
 654
 655        /* Enable DM910X Tx/Rx function */
 656        db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
 657        update_cr6(db->cr6_data, ioaddr);
 658}
 659
 660
 661/*
 662 *      Hardware start transmission.
 663 *      Send a packet to media from the upper layer.
 664 */
 665
 666static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 667                                         struct net_device *dev)
 668{
 669        struct dmfe_board_info *db = netdev_priv(dev);
 670        void __iomem *ioaddr = db->ioaddr;
 671        struct tx_desc *txptr;
 672        unsigned long flags;
 673
 674        DMFE_DBUG(0, "dmfe_start_xmit", 0);
 675
 676        /* Too large packet check */
 677        if (skb->len > MAX_PACKET_SIZE) {
 678                pr_err("big packet = %d\n", (u16)skb->len);
 679                dev_kfree_skb_any(skb);
 680                return NETDEV_TX_OK;
 681        }
 682
 683        /* Resource flag check */
 684        netif_stop_queue(dev);
 685
 686        spin_lock_irqsave(&db->lock, flags);
 687
 688        /* No Tx resource check, it never happen nromally */
 689        if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
 690                spin_unlock_irqrestore(&db->lock, flags);
 691                pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
 692                return NETDEV_TX_BUSY;
 693        }
 694
 695        /* Disable NIC interrupt */
 696        dw32(DCR7, 0);
 697
 698        /* transmit this packet */
 699        txptr = db->tx_insert_ptr;
 700        skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
 701        txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
 702
 703        /* Point to next transmit free descriptor */
 704        db->tx_insert_ptr = txptr->next_tx_desc;
 705
 706        /* Transmit Packet Process */
 707        if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
 708                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 709                db->tx_packet_cnt++;                    /* Ready to send */
 710                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 711                netif_trans_update(dev);                /* saved time stamp */
 712        } else {
 713                db->tx_queue_cnt++;                     /* queue TX packet */
 714                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 715        }
 716
 717        /* Tx resource check */
 718        if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
 719                netif_wake_queue(dev);
 720
 721        /* Restore CR7 to enable interrupt */
 722        spin_unlock_irqrestore(&db->lock, flags);
 723        dw32(DCR7, db->cr7_data);
 724
 725        /* free this SKB */
 726        dev_consume_skb_any(skb);
 727
 728        return NETDEV_TX_OK;
 729}
 730
 731
 732/*
 733 *      Stop the interface.
 734 *      The interface is stopped when it is brought.
 735 */
 736
 737static int dmfe_stop(struct net_device *dev)
 738{
 739        struct dmfe_board_info *db = netdev_priv(dev);
 740        void __iomem *ioaddr = db->ioaddr;
 741
 742        DMFE_DBUG(0, "dmfe_stop", 0);
 743
 744        /* disable system */
 745        netif_stop_queue(dev);
 746
 747        /* deleted timer */
 748        del_timer_sync(&db->timer);
 749
 750        /* Reset & stop DM910X board */
 751        dw32(DCR0, DM910X_RESET);
 752        udelay(100);
 753        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 754
 755        /* free interrupt */
 756        free_irq(db->pdev->irq, dev);
 757
 758        /* free allocated rx buffer */
 759        dmfe_free_rxbuffer(db);
 760
 761#if 0
 762        /* show statistic counter */
 763        printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
 764               db->tx_fifo_underrun, db->tx_excessive_collision,
 765               db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
 766               db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
 767               db->reset_fatal, db->reset_TXtimeout);
 768#endif
 769
 770        return 0;
 771}
 772
 773
 774/*
 775 *      DM9102 insterrupt handler
 776 *      receive the packet to upper layer, free the transmitted packet
 777 */
 778
 779static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 780{
 781        struct net_device *dev = dev_id;
 782        struct dmfe_board_info *db = netdev_priv(dev);
 783        void __iomem *ioaddr = db->ioaddr;
 784        unsigned long flags;
 785
 786        DMFE_DBUG(0, "dmfe_interrupt()", 0);
 787
 788        spin_lock_irqsave(&db->lock, flags);
 789
 790        /* Got DM910X status */
 791        db->cr5_data = dr32(DCR5);
 792        dw32(DCR5, db->cr5_data);
 793        if ( !(db->cr5_data & 0xc1) ) {
 794                spin_unlock_irqrestore(&db->lock, flags);
 795                return IRQ_HANDLED;
 796        }
 797
 798        /* Disable all interrupt in CR7 to solve the interrupt edge problem */
 799        dw32(DCR7, 0);
 800
 801        /* Check system status */
 802        if (db->cr5_data & 0x2000) {
 803                /* system bus error happen */
 804                DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
 805                db->reset_fatal++;
 806                db->wait_reset = 1;     /* Need to RESET */
 807                spin_unlock_irqrestore(&db->lock, flags);
 808                return IRQ_HANDLED;
 809        }
 810
 811         /* Received the coming packet */
 812        if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
 813                dmfe_rx_packet(dev, db);
 814
 815        /* reallocate rx descriptor buffer */
 816        if (db->rx_avail_cnt<RX_DESC_CNT)
 817                allocate_rx_buffer(dev);
 818
 819        /* Free the transmitted descriptor */
 820        if ( db->cr5_data & 0x01)
 821                dmfe_free_tx_pkt(dev, db);
 822
 823        /* Mode Check */
 824        if (db->dm910x_chk_mode & 0x2) {
 825                db->dm910x_chk_mode = 0x4;
 826                db->cr6_data |= 0x100;
 827                update_cr6(db->cr6_data, ioaddr);
 828        }
 829
 830        /* Restore CR7 to enable interrupt mask */
 831        dw32(DCR7, db->cr7_data);
 832
 833        spin_unlock_irqrestore(&db->lock, flags);
 834        return IRQ_HANDLED;
 835}
 836
 837
 838#ifdef CONFIG_NET_POLL_CONTROLLER
 839/*
 840 * Polling 'interrupt' - used by things like netconsole to send skbs
 841 * without having to re-enable interrupts. It's not called while
 842 * the interrupt routine is executing.
 843 */
 844
 845static void poll_dmfe (struct net_device *dev)
 846{
 847        struct dmfe_board_info *db = netdev_priv(dev);
 848        const int irq = db->pdev->irq;
 849
 850        /* disable_irq here is not very nice, but with the lockless
 851           interrupt handler we have no other choice. */
 852        disable_irq(irq);
 853        dmfe_interrupt (irq, dev);
 854        enable_irq(irq);
 855}
 856#endif
 857
 858/*
 859 *      Free TX resource after TX complete
 860 */
 861
 862static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
 863{
 864        struct tx_desc *txptr;
 865        void __iomem *ioaddr = db->ioaddr;
 866        u32 tdes0;
 867
 868        txptr = db->tx_remove_ptr;
 869        while(db->tx_packet_cnt) {
 870                tdes0 = le32_to_cpu(txptr->tdes0);
 871                if (tdes0 & 0x80000000)
 872                        break;
 873
 874                /* A packet sent completed */
 875                db->tx_packet_cnt--;
 876                dev->stats.tx_packets++;
 877
 878                /* Transmit statistic counter */
 879                if ( tdes0 != 0x7fffffff ) {
 880                        dev->stats.collisions += (tdes0 >> 3) & 0xf;
 881                        dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
 882                        if (tdes0 & TDES0_ERR_MASK) {
 883                                dev->stats.tx_errors++;
 884
 885                                if (tdes0 & 0x0002) {   /* UnderRun */
 886                                        db->tx_fifo_underrun++;
 887                                        if ( !(db->cr6_data & CR6_SFT) ) {
 888                                                db->cr6_data = db->cr6_data | CR6_SFT;
 889                                                update_cr6(db->cr6_data, ioaddr);
 890                                        }
 891                                }
 892                                if (tdes0 & 0x0100)
 893                                        db->tx_excessive_collision++;
 894                                if (tdes0 & 0x0200)
 895                                        db->tx_late_collision++;
 896                                if (tdes0 & 0x0400)
 897                                        db->tx_no_carrier++;
 898                                if (tdes0 & 0x0800)
 899                                        db->tx_loss_carrier++;
 900                                if (tdes0 & 0x4000)
 901                                        db->tx_jabber_timeout++;
 902                        }
 903                }
 904
 905                txptr = txptr->next_tx_desc;
 906        }/* End of while */
 907
 908        /* Update TX remove pointer to next */
 909        db->tx_remove_ptr = txptr;
 910
 911        /* Send the Tx packet in queue */
 912        if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
 913                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
 914                db->tx_packet_cnt++;                    /* Ready to send */
 915                db->tx_queue_cnt--;
 916                dw32(DCR1, 0x1);                        /* Issue Tx polling */
 917                netif_trans_update(dev);                /* saved time stamp */
 918        }
 919
 920        /* Resource available check */
 921        if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
 922                netif_wake_queue(dev);  /* Active upper layer, send again */
 923}
 924
 925
 926/*
 927 *      Calculate the CRC valude of the Rx packet
 928 *      flag =  1 : return the reverse CRC (for the received packet CRC)
 929 *              0 : return the normal CRC (for Hash Table index)
 930 */
 931
 932static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
 933{
 934        u32 crc = crc32(~0, Data, Len);
 935        if (flag) crc = ~crc;
 936        return crc;
 937}
 938
 939
 940/*
 941 *      Receive the come packet and pass to upper layer
 942 */
 943
 944static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
 945{
 946        struct rx_desc *rxptr;
 947        struct sk_buff *skb, *newskb;
 948        int rxlen;
 949        u32 rdes0;
 950
 951        rxptr = db->rx_ready_ptr;
 952
 953        while(db->rx_avail_cnt) {
 954                rdes0 = le32_to_cpu(rxptr->rdes0);
 955                if (rdes0 & 0x80000000) /* packet owner check */
 956                        break;
 957
 958                db->rx_avail_cnt--;
 959                db->interval_rx_cnt++;
 960
 961                dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
 962                                 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
 963
 964                if ( (rdes0 & 0x300) != 0x300) {
 965                        /* A packet without First/Last flag */
 966                        /* reuse this SKB */
 967                        DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
 968                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
 969                } else {
 970                        /* A packet with First/Last flag */
 971                        rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
 972
 973                        /* error summary bit check */
 974                        if (rdes0 & 0x8000) {
 975                                /* This is a error packet */
 976                                dev->stats.rx_errors++;
 977                                if (rdes0 & 1)
 978                                        dev->stats.rx_fifo_errors++;
 979                                if (rdes0 & 2)
 980                                        dev->stats.rx_crc_errors++;
 981                                if (rdes0 & 0x80)
 982                                        dev->stats.rx_length_errors++;
 983                        }
 984
 985                        if ( !(rdes0 & 0x8000) ||
 986                                ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
 987                                skb = rxptr->rx_skb_ptr;
 988
 989                                /* Received Packet CRC check need or not */
 990                                if ( (db->dm910x_chk_mode & 1) &&
 991                                        (cal_CRC(skb->data, rxlen, 1) !=
 992                                        (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
 993                                        /* Found a error received packet */
 994                                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
 995                                        db->dm910x_chk_mode = 3;
 996                                } else {
 997                                        /* Good packet, send to upper layer */
 998                                        /* Shorst packet used new SKB */
 999                                        if ((rxlen < RX_COPY_SIZE) &&
1000                                                ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1001                                                != NULL)) {
1002
1003                                                skb = newskb;
1004                                                /* size less than COPY_SIZE, allocate a rxlen SKB */
1005                                                skb_reserve(skb, 2); /* 16byte align */
1006                                                skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1007                                                          skb_put(skb, rxlen),
1008                                                                          rxlen);
1009                                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1010                                        } else
1011                                                skb_put(skb, rxlen);
1012
1013                                        skb->protocol = eth_type_trans(skb, dev);
1014                                        netif_rx(skb);
1015                                        dev->stats.rx_packets++;
1016                                        dev->stats.rx_bytes += rxlen;
1017                                }
1018                        } else {
1019                                /* Reuse SKB buffer when the packet is error */
1020                                DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1021                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1022                        }
1023                }
1024
1025                rxptr = rxptr->next_rx_desc;
1026        }
1027
1028        db->rx_ready_ptr = rxptr;
1029}
1030
1031/*
1032 * Set DM910X multicast address
1033 */
1034
1035static void dmfe_set_filter_mode(struct net_device *dev)
1036{
1037        struct dmfe_board_info *db = netdev_priv(dev);
1038        unsigned long flags;
1039        int mc_count = netdev_mc_count(dev);
1040
1041        DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1042        spin_lock_irqsave(&db->lock, flags);
1043
1044        if (dev->flags & IFF_PROMISC) {
1045                DMFE_DBUG(0, "Enable PROM Mode", 0);
1046                db->cr6_data |= CR6_PM | CR6_PBF;
1047                update_cr6(db->cr6_data, db->ioaddr);
1048                spin_unlock_irqrestore(&db->lock, flags);
1049                return;
1050        }
1051
1052        if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1053                DMFE_DBUG(0, "Pass all multicast address", mc_count);
1054                db->cr6_data &= ~(CR6_PM | CR6_PBF);
1055                db->cr6_data |= CR6_PAM;
1056                spin_unlock_irqrestore(&db->lock, flags);
1057                return;
1058        }
1059
1060        DMFE_DBUG(0, "Set multicast address", mc_count);
1061        if (db->chip_id == PCI_DM9132_ID)
1062                dm9132_id_table(dev);   /* DM9132 */
1063        else
1064                send_filter_frame(dev); /* DM9102/DM9102A */
1065        spin_unlock_irqrestore(&db->lock, flags);
1066}
1067
1068/*
1069 *      Ethtool interace
1070 */
1071
1072static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1073                               struct ethtool_drvinfo *info)
1074{
1075        struct dmfe_board_info *np = netdev_priv(dev);
1076
1077        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1078        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1079}
1080
1081static int dmfe_ethtool_set_wol(struct net_device *dev,
1082                                struct ethtool_wolinfo *wolinfo)
1083{
1084        struct dmfe_board_info *db = netdev_priv(dev);
1085
1086        if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1087                                WAKE_ARP | WAKE_MAGICSECURE))
1088                   return -EOPNOTSUPP;
1089
1090        db->wol_mode = wolinfo->wolopts;
1091        return 0;
1092}
1093
1094static void dmfe_ethtool_get_wol(struct net_device *dev,
1095                                 struct ethtool_wolinfo *wolinfo)
1096{
1097        struct dmfe_board_info *db = netdev_priv(dev);
1098
1099        wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1100        wolinfo->wolopts = db->wol_mode;
1101}
1102
1103
1104static const struct ethtool_ops netdev_ethtool_ops = {
1105        .get_drvinfo            = dmfe_ethtool_get_drvinfo,
1106        .get_link               = ethtool_op_get_link,
1107        .set_wol                = dmfe_ethtool_set_wol,
1108        .get_wol                = dmfe_ethtool_get_wol,
1109};
1110
1111/*
1112 *      A periodic timer routine
1113 *      Dynamic media sense, allocate Rx buffer...
1114 */
1115
1116static void dmfe_timer(struct timer_list *t)
1117{
1118        struct dmfe_board_info *db = from_timer(db, t, timer);
1119        struct net_device *dev = pci_get_drvdata(db->pdev);
1120        void __iomem *ioaddr = db->ioaddr;
1121        u32 tmp_cr8;
1122        unsigned char tmp_cr12;
1123        unsigned long flags;
1124
1125        int link_ok, link_ok_phy;
1126
1127        DMFE_DBUG(0, "dmfe_timer()", 0);
1128        spin_lock_irqsave(&db->lock, flags);
1129
1130        /* Media mode process when Link OK before enter this route */
1131        if (db->first_in_callback == 0) {
1132                db->first_in_callback = 1;
1133                if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1134                        db->cr6_data &= ~0x40000;
1135                        update_cr6(db->cr6_data, ioaddr);
1136                        dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1137                        db->cr6_data |= 0x40000;
1138                        update_cr6(db->cr6_data, ioaddr);
1139                        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1140                        add_timer(&db->timer);
1141                        spin_unlock_irqrestore(&db->lock, flags);
1142                        return;
1143                }
1144        }
1145
1146
1147        /* Operating Mode Check */
1148        if ( (db->dm910x_chk_mode & 0x1) &&
1149                (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1150                db->dm910x_chk_mode = 0x4;
1151
1152        /* Dynamic reset DM910X : system error or transmit time-out */
1153        tmp_cr8 = dr32(DCR8);
1154        if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1155                db->reset_cr8++;
1156                db->wait_reset = 1;
1157        }
1158        db->interval_rx_cnt = 0;
1159
1160        /* TX polling kick monitor */
1161        if ( db->tx_packet_cnt &&
1162             time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1163                dw32(DCR1, 0x1);   /* Tx polling again */
1164
1165                /* TX Timeout */
1166                if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1167                        db->reset_TXtimeout++;
1168                        db->wait_reset = 1;
1169                        dev_warn(&dev->dev, "Tx timeout - resetting\n");
1170                }
1171        }
1172
1173        if (db->wait_reset) {
1174                DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1175                db->reset_count++;
1176                dmfe_dynamic_reset(dev);
1177                db->first_in_callback = 0;
1178                db->timer.expires = DMFE_TIMER_WUT;
1179                add_timer(&db->timer);
1180                spin_unlock_irqrestore(&db->lock, flags);
1181                return;
1182        }
1183
1184        /* Link status check, Dynamic media type change */
1185        if (db->chip_id == PCI_DM9132_ID)
1186                tmp_cr12 = dr8(DCR9 + 3);       /* DM9132 */
1187        else
1188                tmp_cr12 = dr8(DCR12);          /* DM9102/DM9102A */
1189
1190        if ( ((db->chip_id == PCI_DM9102_ID) &&
1191                (db->chip_revision == 0x30)) ||
1192                ((db->chip_id == PCI_DM9132_ID) &&
1193                (db->chip_revision == 0x10)) ) {
1194                /* DM9102A Chip */
1195                if (tmp_cr12 & 2)
1196                        link_ok = 0;
1197                else
1198                        link_ok = 1;
1199        }
1200        else
1201                /*0x43 is used instead of 0x3 because bit 6 should represent
1202                        link status of external PHY */
1203                link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1204
1205
1206        /* If chip reports that link is failed it could be because external
1207                PHY link status pin is not connected correctly to chip
1208                To be sure ask PHY too.
1209        */
1210
1211        /* need a dummy read because of PHY's register latch*/
1212        dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1213        link_ok_phy = (dmfe_phy_read (db->ioaddr,
1214                                      db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1215
1216        if (link_ok_phy != link_ok) {
1217                DMFE_DBUG (0, "PHY and chip report different link status", 0);
1218                link_ok = link_ok | link_ok_phy;
1219        }
1220
1221        if ( !link_ok && netif_carrier_ok(dev)) {
1222                /* Link Failed */
1223                DMFE_DBUG(0, "Link Failed", tmp_cr12);
1224                netif_carrier_off(dev);
1225
1226                /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1227                /* AUTO or force 1M Homerun/Longrun don't need */
1228                if ( !(db->media_mode & 0x38) )
1229                        dmfe_phy_write(db->ioaddr, db->phy_addr,
1230                                       0, 0x1000, db->chip_id);
1231
1232                /* AUTO mode, if INT phyxcer link failed, select EXT device */
1233                if (db->media_mode & DMFE_AUTO) {
1234                        /* 10/100M link failed, used 1M Home-Net */
1235                        db->cr6_data|=0x00040000;       /* bit18=1, MII */
1236                        db->cr6_data&=~0x00000200;      /* bit9=0, HD mode */
1237                        update_cr6(db->cr6_data, ioaddr);
1238                }
1239        } else if (!netif_carrier_ok(dev)) {
1240
1241                DMFE_DBUG(0, "Link link OK", tmp_cr12);
1242
1243                /* Auto Sense Speed */
1244                if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1245                        netif_carrier_on(dev);
1246                        SHOW_MEDIA_TYPE(db->op_mode);
1247                }
1248
1249                dmfe_process_mode(db);
1250        }
1251
1252        /* HPNA remote command check */
1253        if (db->HPNA_command & 0xf00) {
1254                db->HPNA_timer--;
1255                if (!db->HPNA_timer)
1256                        dmfe_HPNA_remote_cmd_chk(db);
1257        }
1258
1259        /* Timer active again */
1260        db->timer.expires = DMFE_TIMER_WUT;
1261        add_timer(&db->timer);
1262        spin_unlock_irqrestore(&db->lock, flags);
1263}
1264
1265
1266/*
1267 *      Dynamic reset the DM910X board
1268 *      Stop DM910X board
1269 *      Free Tx/Rx allocated memory
1270 *      Reset DM910X board
1271 *      Re-initialize DM910X board
1272 */
1273
1274static void dmfe_dynamic_reset(struct net_device *dev)
1275{
1276        struct dmfe_board_info *db = netdev_priv(dev);
1277        void __iomem *ioaddr = db->ioaddr;
1278
1279        DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1280
1281        /* Sopt MAC controller */
1282        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1283        update_cr6(db->cr6_data, ioaddr);
1284        dw32(DCR7, 0);                          /* Disable Interrupt */
1285        dw32(DCR5, dr32(DCR5));
1286
1287        /* Disable upper layer interface */
1288        netif_stop_queue(dev);
1289
1290        /* Free Rx Allocate buffer */
1291        dmfe_free_rxbuffer(db);
1292
1293        /* system variable init */
1294        db->tx_packet_cnt = 0;
1295        db->tx_queue_cnt = 0;
1296        db->rx_avail_cnt = 0;
1297        netif_carrier_off(dev);
1298        db->wait_reset = 0;
1299
1300        /* Re-initialize DM910X board */
1301        dmfe_init_dm910x(dev);
1302
1303        /* Restart upper layer interface */
1304        netif_wake_queue(dev);
1305}
1306
1307
1308/*
1309 *      free all allocated rx buffer
1310 */
1311
1312static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1313{
1314        DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1315
1316        /* free allocated rx buffer */
1317        while (db->rx_avail_cnt) {
1318                dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1319                db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1320                db->rx_avail_cnt--;
1321        }
1322}
1323
1324
1325/*
1326 *      Reuse the SK buffer
1327 */
1328
1329static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1330{
1331        struct rx_desc *rxptr = db->rx_insert_ptr;
1332
1333        if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1334                rxptr->rx_skb_ptr = skb;
1335                rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
1336                                                          RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1337                wmb();
1338                rxptr->rdes0 = cpu_to_le32(0x80000000);
1339                db->rx_avail_cnt++;
1340                db->rx_insert_ptr = rxptr->next_rx_desc;
1341        } else
1342                DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1343}
1344
1345
1346/*
1347 *      Initialize transmit/Receive descriptor
1348 *      Using Chain structure, and allocate Tx/Rx buffer
1349 */
1350
1351static void dmfe_descriptor_init(struct net_device *dev)
1352{
1353        struct dmfe_board_info *db = netdev_priv(dev);
1354        void __iomem *ioaddr = db->ioaddr;
1355        struct tx_desc *tmp_tx;
1356        struct rx_desc *tmp_rx;
1357        unsigned char *tmp_buf;
1358        dma_addr_t tmp_tx_dma, tmp_rx_dma;
1359        dma_addr_t tmp_buf_dma;
1360        int i;
1361
1362        DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1363
1364        /* tx descriptor start pointer */
1365        db->tx_insert_ptr = db->first_tx_desc;
1366        db->tx_remove_ptr = db->first_tx_desc;
1367        dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1368
1369        /* rx descriptor start pointer */
1370        db->first_rx_desc = (void *)db->first_tx_desc +
1371                        sizeof(struct tx_desc) * TX_DESC_CNT;
1372
1373        db->first_rx_desc_dma =  db->first_tx_desc_dma +
1374                        sizeof(struct tx_desc) * TX_DESC_CNT;
1375        db->rx_insert_ptr = db->first_rx_desc;
1376        db->rx_ready_ptr = db->first_rx_desc;
1377        dw32(DCR3, db->first_rx_desc_dma);              /* RX DESC address */
1378
1379        /* Init Transmit chain */
1380        tmp_buf = db->buf_pool_start;
1381        tmp_buf_dma = db->buf_pool_dma_start;
1382        tmp_tx_dma = db->first_tx_desc_dma;
1383        for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1384                tmp_tx->tx_buf_ptr = tmp_buf;
1385                tmp_tx->tdes0 = cpu_to_le32(0);
1386                tmp_tx->tdes1 = cpu_to_le32(0x81000000);        /* IC, chain */
1387                tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1388                tmp_tx_dma += sizeof(struct tx_desc);
1389                tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1390                tmp_tx->next_tx_desc = tmp_tx + 1;
1391                tmp_buf = tmp_buf + TX_BUF_ALLOC;
1392                tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1393        }
1394        (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1395        tmp_tx->next_tx_desc = db->first_tx_desc;
1396
1397         /* Init Receive descriptor chain */
1398        tmp_rx_dma=db->first_rx_desc_dma;
1399        for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1400                tmp_rx->rdes0 = cpu_to_le32(0);
1401                tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1402                tmp_rx_dma += sizeof(struct rx_desc);
1403                tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1404                tmp_rx->next_rx_desc = tmp_rx + 1;
1405        }
1406        (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1407        tmp_rx->next_rx_desc = db->first_rx_desc;
1408
1409        /* pre-allocate Rx buffer */
1410        allocate_rx_buffer(dev);
1411}
1412
1413
1414/*
1415 *      Update CR6 value
1416 *      Firstly stop DM910X , then written value and start
1417 */
1418
1419static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1420{
1421        u32 cr6_tmp;
1422
1423        cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1424        dw32(DCR6, cr6_tmp);
1425        udelay(5);
1426        dw32(DCR6, cr6_data);
1427        udelay(5);
1428}
1429
1430
1431/*
1432 *      Send a setup frame for DM9132
1433 *      This setup frame initialize DM910X address filter mode
1434*/
1435
1436static void dm9132_id_table(struct net_device *dev)
1437{
1438        const u16 *addrptr = (const u16 *)dev->dev_addr;
1439        struct dmfe_board_info *db = netdev_priv(dev);
1440        void __iomem *ioaddr = db->ioaddr + 0xc0;
1441        struct netdev_hw_addr *ha;
1442        u16 i, hash_table[4];
1443
1444        /* Node address */
1445        for (i = 0; i < 3; i++) {
1446                dw16(0, addrptr[i]);
1447                ioaddr += 4;
1448        }
1449
1450        /* Clear Hash Table */
1451        memset(hash_table, 0, sizeof(hash_table));
1452
1453        /* broadcast address */
1454        hash_table[3] = 0x8000;
1455
1456        /* the multicast address in Hash Table : 64 bits */
1457        netdev_for_each_mc_addr(ha, dev) {
1458                u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1459
1460                hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1461        }
1462
1463        /* Write the hash table to MAC MD table */
1464        for (i = 0; i < 4; i++, ioaddr += 4)
1465                dw16(0, hash_table[i]);
1466}
1467
1468
1469/*
1470 *      Send a setup frame for DM9102/DM9102A
1471 *      This setup frame initialize DM910X address filter mode
1472 */
1473
1474static void send_filter_frame(struct net_device *dev)
1475{
1476        struct dmfe_board_info *db = netdev_priv(dev);
1477        struct netdev_hw_addr *ha;
1478        struct tx_desc *txptr;
1479        const u16 * addrptr;
1480        u32 * suptr;
1481        int i;
1482
1483        DMFE_DBUG(0, "send_filter_frame()", 0);
1484
1485        txptr = db->tx_insert_ptr;
1486        suptr = (u32 *) txptr->tx_buf_ptr;
1487
1488        /* Node address */
1489        addrptr = (const u16 *) dev->dev_addr;
1490        *suptr++ = addrptr[0];
1491        *suptr++ = addrptr[1];
1492        *suptr++ = addrptr[2];
1493
1494        /* broadcast address */
1495        *suptr++ = 0xffff;
1496        *suptr++ = 0xffff;
1497        *suptr++ = 0xffff;
1498
1499        /* fit the multicast address */
1500        netdev_for_each_mc_addr(ha, dev) {
1501                addrptr = (u16 *) ha->addr;
1502                *suptr++ = addrptr[0];
1503                *suptr++ = addrptr[1];
1504                *suptr++ = addrptr[2];
1505        }
1506
1507        for (i = netdev_mc_count(dev); i < 14; i++) {
1508                *suptr++ = 0xffff;
1509                *suptr++ = 0xffff;
1510                *suptr++ = 0xffff;
1511        }
1512
1513        /* prepare the setup frame */
1514        db->tx_insert_ptr = txptr->next_tx_desc;
1515        txptr->tdes1 = cpu_to_le32(0x890000c0);
1516
1517        /* Resource Check and Send the setup packet */
1518        if (!db->tx_packet_cnt) {
1519                void __iomem *ioaddr = db->ioaddr;
1520
1521                /* Resource Empty */
1522                db->tx_packet_cnt++;
1523                txptr->tdes0 = cpu_to_le32(0x80000000);
1524                update_cr6(db->cr6_data | 0x2000, ioaddr);
1525                dw32(DCR1, 0x1);        /* Issue Tx polling */
1526                update_cr6(db->cr6_data, ioaddr);
1527                netif_trans_update(dev);
1528        } else
1529                db->tx_queue_cnt++;     /* Put in TX queue */
1530}
1531
1532
1533/*
1534 *      Allocate rx buffer,
1535 *      As possible as allocate maxiumn Rx buffer
1536 */
1537
1538static void allocate_rx_buffer(struct net_device *dev)
1539{
1540        struct dmfe_board_info *db = netdev_priv(dev);
1541        struct rx_desc *rxptr;
1542        struct sk_buff *skb;
1543
1544        rxptr = db->rx_insert_ptr;
1545
1546        while(db->rx_avail_cnt < RX_DESC_CNT) {
1547                if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1548                        break;
1549                rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1550                rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
1551                                                          RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1552                wmb();
1553                rxptr->rdes0 = cpu_to_le32(0x80000000);
1554                rxptr = rxptr->next_rx_desc;
1555                db->rx_avail_cnt++;
1556        }
1557
1558        db->rx_insert_ptr = rxptr;
1559}
1560
1561static void srom_clk_write(void __iomem *ioaddr, u32 data)
1562{
1563        static const u32 cmd[] = {
1564                CR9_SROM_READ | CR9_SRCS,
1565                CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1566                CR9_SROM_READ | CR9_SRCS
1567        };
1568        int i;
1569
1570        for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1571                dw32(DCR9, data | cmd[i]);
1572                udelay(5);
1573        }
1574}
1575
1576/*
1577 *      Read one word data from the serial ROM
1578 */
1579static u16 read_srom_word(void __iomem *ioaddr, int offset)
1580{
1581        u16 srom_data;
1582        int i;
1583
1584        dw32(DCR9, CR9_SROM_READ);
1585        udelay(5);
1586        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1587        udelay(5);
1588
1589        /* Send the Read Command 110b */
1590        srom_clk_write(ioaddr, SROM_DATA_1);
1591        srom_clk_write(ioaddr, SROM_DATA_1);
1592        srom_clk_write(ioaddr, SROM_DATA_0);
1593
1594        /* Send the offset */
1595        for (i = 5; i >= 0; i--) {
1596                srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1597                srom_clk_write(ioaddr, srom_data);
1598        }
1599
1600        dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1601        udelay(5);
1602
1603        for (i = 16; i > 0; i--) {
1604                dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1605                udelay(5);
1606                srom_data = (srom_data << 1) |
1607                                ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1608                dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1609                udelay(5);
1610        }
1611
1612        dw32(DCR9, CR9_SROM_READ);
1613        udelay(5);
1614        return srom_data;
1615}
1616
1617
1618/*
1619 *      Auto sense the media mode
1620 */
1621
1622static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1623{
1624        void __iomem *ioaddr = db->ioaddr;
1625        u8 ErrFlag = 0;
1626        u16 phy_mode;
1627
1628        /* CR6 bit18=0, select 10/100M */
1629        update_cr6(db->cr6_data & ~0x40000, ioaddr);
1630
1631        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1632        phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1633
1634        if ( (phy_mode & 0x24) == 0x24 ) {
1635                if (db->chip_id == PCI_DM9132_ID)       /* DM9132 */
1636                        phy_mode = dmfe_phy_read(db->ioaddr,
1637                                                 db->phy_addr, 7, db->chip_id) & 0xf000;
1638                else                            /* DM9102/DM9102A */
1639                        phy_mode = dmfe_phy_read(db->ioaddr,
1640                                                 db->phy_addr, 17, db->chip_id) & 0xf000;
1641                switch (phy_mode) {
1642                case 0x1000: db->op_mode = DMFE_10MHF; break;
1643                case 0x2000: db->op_mode = DMFE_10MFD; break;
1644                case 0x4000: db->op_mode = DMFE_100MHF; break;
1645                case 0x8000: db->op_mode = DMFE_100MFD; break;
1646                default: db->op_mode = DMFE_10MHF;
1647                        ErrFlag = 1;
1648                        break;
1649                }
1650        } else {
1651                db->op_mode = DMFE_10MHF;
1652                DMFE_DBUG(0, "Link Failed :", phy_mode);
1653                ErrFlag = 1;
1654        }
1655
1656        return ErrFlag;
1657}
1658
1659
1660/*
1661 *      Set 10/100 phyxcer capability
1662 *      AUTO mode : phyxcer register4 is NIC capability
1663 *      Force mode: phyxcer register4 is the force media
1664 */
1665
1666static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1667{
1668        void __iomem *ioaddr = db->ioaddr;
1669        u16 phy_reg;
1670
1671        /* Select 10/100M phyxcer */
1672        db->cr6_data &= ~0x40000;
1673        update_cr6(db->cr6_data, ioaddr);
1674
1675        /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1676        if (db->chip_id == PCI_DM9009_ID) {
1677                phy_reg = dmfe_phy_read(db->ioaddr,
1678                                        db->phy_addr, 18, db->chip_id) & ~0x1000;
1679
1680                dmfe_phy_write(db->ioaddr,
1681                               db->phy_addr, 18, phy_reg, db->chip_id);
1682        }
1683
1684        /* Phyxcer capability setting */
1685        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1686
1687        if (db->media_mode & DMFE_AUTO) {
1688                /* AUTO Mode */
1689                phy_reg |= db->PHY_reg4;
1690        } else {
1691                /* Force Mode */
1692                switch(db->media_mode) {
1693                case DMFE_10MHF: phy_reg |= 0x20; break;
1694                case DMFE_10MFD: phy_reg |= 0x40; break;
1695                case DMFE_100MHF: phy_reg |= 0x80; break;
1696                case DMFE_100MFD: phy_reg |= 0x100; break;
1697                }
1698                if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1699        }
1700
1701        /* Write new capability to Phyxcer Reg4 */
1702        if ( !(phy_reg & 0x01e0)) {
1703                phy_reg|=db->PHY_reg4;
1704                db->media_mode|=DMFE_AUTO;
1705        }
1706        dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1707
1708        /* Restart Auto-Negotiation */
1709        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1710                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1711        if ( !db->chip_type )
1712                dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1713}
1714
1715
1716/*
1717 *      Process op-mode
1718 *      AUTO mode : PHY controller in Auto-negotiation Mode
1719 *      Force mode: PHY controller in force mode with HUB
1720 *                      N-way force capability with SWITCH
1721 */
1722
1723static void dmfe_process_mode(struct dmfe_board_info *db)
1724{
1725        u16 phy_reg;
1726
1727        /* Full Duplex Mode Check */
1728        if (db->op_mode & 0x4)
1729                db->cr6_data |= CR6_FDM;        /* Set Full Duplex Bit */
1730        else
1731                db->cr6_data &= ~CR6_FDM;       /* Clear Full Duplex Bit */
1732
1733        /* Transciver Selection */
1734        if (db->op_mode & 0x10)         /* 1M HomePNA */
1735                db->cr6_data |= 0x40000;/* External MII select */
1736        else
1737                db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1738
1739        update_cr6(db->cr6_data, db->ioaddr);
1740
1741        /* 10/100M phyxcer force mode need */
1742        if ( !(db->media_mode & 0x18)) {
1743                /* Forece Mode */
1744                phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1745                if ( !(phy_reg & 0x1) ) {
1746                        /* parter without N-Way capability */
1747                        phy_reg = 0x0;
1748                        switch(db->op_mode) {
1749                        case DMFE_10MHF: phy_reg = 0x0; break;
1750                        case DMFE_10MFD: phy_reg = 0x100; break;
1751                        case DMFE_100MHF: phy_reg = 0x2000; break;
1752                        case DMFE_100MFD: phy_reg = 0x2100; break;
1753                        }
1754                        dmfe_phy_write(db->ioaddr,
1755                                       db->phy_addr, 0, phy_reg, db->chip_id);
1756                        if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1757                                mdelay(20);
1758                        dmfe_phy_write(db->ioaddr,
1759                                       db->phy_addr, 0, phy_reg, db->chip_id);
1760                }
1761        }
1762}
1763
1764
1765/*
1766 *      Write a word to Phy register
1767 */
1768
1769static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1770                           u16 phy_data, u32 chip_id)
1771{
1772        u16 i;
1773
1774        if (chip_id == PCI_DM9132_ID) {
1775                dw16(0x80 + offset * 4, phy_data);
1776        } else {
1777                /* DM9102/DM9102A Chip */
1778
1779                /* Send 33 synchronization clock to Phy controller */
1780                for (i = 0; i < 35; i++)
1781                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1782
1783                /* Send start command(01) to Phy */
1784                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1785                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1786
1787                /* Send write command(01) to Phy */
1788                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1789                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1790
1791                /* Send Phy address */
1792                for (i = 0x10; i > 0; i = i >> 1)
1793                        dmfe_phy_write_1bit(ioaddr,
1794                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1795
1796                /* Send register address */
1797                for (i = 0x10; i > 0; i = i >> 1)
1798                        dmfe_phy_write_1bit(ioaddr,
1799                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1800
1801                /* written trasnition */
1802                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1803                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1804
1805                /* Write a word data to PHY controller */
1806                for ( i = 0x8000; i > 0; i >>= 1)
1807                        dmfe_phy_write_1bit(ioaddr,
1808                                            phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1809        }
1810}
1811
1812
1813/*
1814 *      Read a word data from phy register
1815 */
1816
1817static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1818{
1819        int i;
1820        u16 phy_data;
1821
1822        if (chip_id == PCI_DM9132_ID) {
1823                /* DM9132 Chip */
1824                phy_data = dr16(0x80 + offset * 4);
1825        } else {
1826                /* DM9102/DM9102A Chip */
1827
1828                /* Send 33 synchronization clock to Phy controller */
1829                for (i = 0; i < 35; i++)
1830                        dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1831
1832                /* Send start command(01) to Phy */
1833                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1834                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1835
1836                /* Send read command(10) to Phy */
1837                dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1838                dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1839
1840                /* Send Phy address */
1841                for (i = 0x10; i > 0; i = i >> 1)
1842                        dmfe_phy_write_1bit(ioaddr,
1843                                            phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1844
1845                /* Send register address */
1846                for (i = 0x10; i > 0; i = i >> 1)
1847                        dmfe_phy_write_1bit(ioaddr,
1848                                            offset & i ? PHY_DATA_1 : PHY_DATA_0);
1849
1850                /* Skip transition state */
1851                dmfe_phy_read_1bit(ioaddr);
1852
1853                /* read 16bit data */
1854                for (phy_data = 0, i = 0; i < 16; i++) {
1855                        phy_data <<= 1;
1856                        phy_data |= dmfe_phy_read_1bit(ioaddr);
1857                }
1858        }
1859
1860        return phy_data;
1861}
1862
1863
1864/*
1865 *      Write one bit data to Phy Controller
1866 */
1867
1868static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1869{
1870        dw32(DCR9, phy_data);           /* MII Clock Low */
1871        udelay(1);
1872        dw32(DCR9, phy_data | MDCLKH);  /* MII Clock High */
1873        udelay(1);
1874        dw32(DCR9, phy_data);           /* MII Clock Low */
1875        udelay(1);
1876}
1877
1878
1879/*
1880 *      Read one bit phy data from PHY controller
1881 */
1882
1883static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1884{
1885        u16 phy_data;
1886
1887        dw32(DCR9, 0x50000);
1888        udelay(1);
1889        phy_data = (dr32(DCR9) >> 19) & 0x1;
1890        dw32(DCR9, 0x40000);
1891        udelay(1);
1892
1893        return phy_data;
1894}
1895
1896
1897/*
1898 *      Parser SROM and media mode
1899 */
1900
1901static void dmfe_parse_srom(struct dmfe_board_info * db)
1902{
1903        char * srom = db->srom;
1904        int dmfe_mode, tmp_reg;
1905
1906        DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1907
1908        /* Init CR15 */
1909        db->cr15_data = CR15_DEFAULT;
1910
1911        /* Check SROM Version */
1912        if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1913                /* SROM V4.01 */
1914                /* Get NIC support media mode */
1915                db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1916                db->PHY_reg4 = 0;
1917                for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1918                        switch( db->NIC_capability & tmp_reg ) {
1919                        case 0x1: db->PHY_reg4 |= 0x0020; break;
1920                        case 0x2: db->PHY_reg4 |= 0x0040; break;
1921                        case 0x4: db->PHY_reg4 |= 0x0080; break;
1922                        case 0x8: db->PHY_reg4 |= 0x0100; break;
1923                        }
1924                }
1925
1926                /* Media Mode Force or not check */
1927                dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1928                             le32_to_cpup((__le32 *) (srom + 36)));
1929                switch(dmfe_mode) {
1930                case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1931                case 0x2: dmfe_media_mode = DMFE_10MFD; break;  /* 10MFD */
1932                case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1933                case 0x100:
1934                case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1935                }
1936
1937                /* Special Function setting */
1938                /* VLAN function */
1939                if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1940                        db->cr15_data |= 0x40;
1941
1942                /* Flow Control */
1943                if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1944                        db->cr15_data |= 0x400;
1945
1946                /* TX pause packet */
1947                if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1948                        db->cr15_data |= 0x9800;
1949        }
1950
1951        /* Parse HPNA parameter */
1952        db->HPNA_command = 1;
1953
1954        /* Accept remote command or not */
1955        if (HPNA_rx_cmd == 0)
1956                db->HPNA_command |= 0x8000;
1957
1958         /* Issue remote command & operation mode */
1959        if (HPNA_tx_cmd == 1)
1960                switch(HPNA_mode) {     /* Issue Remote Command */
1961                case 0: db->HPNA_command |= 0x0904; break;
1962                case 1: db->HPNA_command |= 0x0a00; break;
1963                case 2: db->HPNA_command |= 0x0506; break;
1964                case 3: db->HPNA_command |= 0x0602; break;
1965                }
1966        else
1967                switch(HPNA_mode) {     /* Don't Issue */
1968                case 0: db->HPNA_command |= 0x0004; break;
1969                case 1: db->HPNA_command |= 0x0000; break;
1970                case 2: db->HPNA_command |= 0x0006; break;
1971                case 3: db->HPNA_command |= 0x0002; break;
1972                }
1973
1974        /* Check DM9801 or DM9802 present or not */
1975        db->HPNA_present = 0;
1976        update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1977        tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1978        if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1979                /* DM9801 or DM9802 present */
1980                db->HPNA_timer = 8;
1981                if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1982                        /* DM9801 HomeRun */
1983                        db->HPNA_present = 1;
1984                        dmfe_program_DM9801(db, tmp_reg);
1985                } else {
1986                        /* DM9802 LongRun */
1987                        db->HPNA_present = 2;
1988                        dmfe_program_DM9802(db);
1989                }
1990        }
1991
1992}
1993
1994
1995/*
1996 *      Init HomeRun DM9801
1997 */
1998
1999static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2000{
2001        uint reg17, reg25;
2002
2003        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2004        switch(HPNA_rev) {
2005        case 0xb900: /* DM9801 E3 */
2006                db->HPNA_command |= 0x1000;
2007                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2008                reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2009                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2010                break;
2011        case 0xb901: /* DM9801 E4 */
2012                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2013                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2014                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2015                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2016                break;
2017        case 0xb902: /* DM9801 E5 */
2018        case 0xb903: /* DM9801 E6 */
2019        default:
2020                db->HPNA_command |= 0x1000;
2021                reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2022                reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2023                reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2024                reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2025                break;
2026        }
2027        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2028        dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2029        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2030}
2031
2032
2033/*
2034 *      Init HomeRun DM9802
2035 */
2036
2037static void dmfe_program_DM9802(struct dmfe_board_info * db)
2038{
2039        uint phy_reg;
2040
2041        if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2042        dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2043        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2044        phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2045        dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2046}
2047
2048
2049/*
2050 *      Check remote HPNA power and speed status. If not correct,
2051 *      issue command again.
2052*/
2053
2054static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2055{
2056        uint phy_reg;
2057
2058        /* Got remote device status */
2059        phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2060        switch(phy_reg) {
2061        case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2062        case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2063        case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2064        case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2065        }
2066
2067        /* Check remote device status match our setting ot not */
2068        if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2069                dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2070                               db->chip_id);
2071                db->HPNA_timer=8;
2072        } else
2073                db->HPNA_timer=600;     /* Match, every 10 minutes, check */
2074}
2075
2076
2077
2078static const struct pci_device_id dmfe_pci_tbl[] = {
2079        { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2080        { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2081        { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2082        { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2083        { 0, }
2084};
2085MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2086
2087static int __maybe_unused dmfe_suspend(struct device *dev_d)
2088{
2089        struct net_device *dev = dev_get_drvdata(dev_d);
2090        struct dmfe_board_info *db = netdev_priv(dev);
2091        void __iomem *ioaddr = db->ioaddr;
2092
2093        /* Disable upper layer interface */
2094        netif_device_detach(dev);
2095
2096        /* Disable Tx/Rx */
2097        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2098        update_cr6(db->cr6_data, ioaddr);
2099
2100        /* Disable Interrupt */
2101        dw32(DCR7, 0);
2102        dw32(DCR5, dr32(DCR5));
2103
2104        /* Fre RX buffers */
2105        dmfe_free_rxbuffer(db);
2106
2107        /* Enable WOL */
2108        device_wakeup_enable(dev_d);
2109
2110        return 0;
2111}
2112
2113static int __maybe_unused dmfe_resume(struct device *dev_d)
2114{
2115        struct net_device *dev = dev_get_drvdata(dev_d);
2116
2117        /* Re-initialize DM910X board */
2118        dmfe_init_dm910x(dev);
2119
2120        /* Disable WOL */
2121        device_wakeup_disable(dev_d);
2122
2123        /* Restart upper layer interface */
2124        netif_device_attach(dev);
2125
2126        return 0;
2127}
2128
2129static SIMPLE_DEV_PM_OPS(dmfe_pm_ops, dmfe_suspend, dmfe_resume);
2130
2131static struct pci_driver dmfe_driver = {
2132        .name           = "dmfe",
2133        .id_table       = dmfe_pci_tbl,
2134        .probe          = dmfe_init_one,
2135        .remove         = dmfe_remove_one,
2136        .driver.pm      = &dmfe_pm_ops,
2137};
2138
2139MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2140MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2141MODULE_LICENSE("GPL");
2142
2143module_param(debug, int, 0);
2144module_param(mode, byte, 0);
2145module_param(cr6set, int, 0);
2146module_param(chkmode, byte, 0);
2147module_param(HPNA_mode, byte, 0);
2148module_param(HPNA_rx_cmd, byte, 0);
2149module_param(HPNA_tx_cmd, byte, 0);
2150module_param(HPNA_NoiseFloor, byte, 0);
2151module_param(SF_mode, byte, 0);
2152MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2153MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2154                "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2155
2156MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2157                "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2158
2159/*      Description:
2160 *      when user used insmod to add module, system invoked init_module()
2161 *      to initialize and register.
2162 */
2163
2164static int __init dmfe_init_module(void)
2165{
2166        int rc;
2167
2168        DMFE_DBUG(0, "init_module() ", debug);
2169
2170        if (debug)
2171                dmfe_debug = debug;     /* set debug flag */
2172        if (cr6set)
2173                dmfe_cr6_user_set = cr6set;
2174
2175        switch (mode) {
2176        case DMFE_10MHF:
2177        case DMFE_100MHF:
2178        case DMFE_10MFD:
2179        case DMFE_100MFD:
2180        case DMFE_1M_HPNA:
2181                dmfe_media_mode = mode;
2182                break;
2183        default:
2184                dmfe_media_mode = DMFE_AUTO;
2185                break;
2186        }
2187
2188        if (HPNA_mode > 4)
2189                HPNA_mode = 0;          /* Default: LP/HS */
2190        if (HPNA_rx_cmd > 1)
2191                HPNA_rx_cmd = 0;        /* Default: Ignored remote cmd */
2192        if (HPNA_tx_cmd > 1)
2193                HPNA_tx_cmd = 0;        /* Default: Don't issue remote cmd */
2194        if (HPNA_NoiseFloor > 15)
2195                HPNA_NoiseFloor = 0;
2196
2197        rc = pci_register_driver(&dmfe_driver);
2198        if (rc < 0)
2199                return rc;
2200
2201        return 0;
2202}
2203
2204
2205/*
2206 *      Description:
2207 *      when user used rmmod to delete module, system invoked clean_module()
2208 *      to un-register all registered services.
2209 */
2210
2211static void __exit dmfe_cleanup_module(void)
2212{
2213        DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2214        pci_unregister_driver(&dmfe_driver);
2215}
2216
2217module_init(dmfe_init_module);
2218module_exit(dmfe_cleanup_module);
2219