linux/drivers/net/ethernet/dlink/dl2k.c
<<
>>
Prefs
   1/*  D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
   2/*
   3    Copyright (c) 2001, 2002 by D-Link Corporation
   4    Written by Edward Peng.<edward_peng@dlink.com.tw>
   5    Created 03-May-2001, base on Linux' sundance.c.
   6
   7    This program is free software; you can redistribute it and/or modify
   8    it under the terms of the GNU General Public License as published by
   9    the Free Software Foundation; either version 2 of the License, or
  10    (at your option) any later version.
  11*/
  12
  13#define DRV_NAME        "DL2000/TC902x-based linux driver"
  14#define DRV_VERSION     "v1.19"
  15#define DRV_RELDATE     "2007/08/12"
  16#include "dl2k.h"
  17#include <linux/dma-mapping.h>
  18
  19#define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
  20#define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
  21#define dw8(reg, val)   iowrite8(val, ioaddr + (reg))
  22#define dr32(reg)       ioread32(ioaddr + (reg))
  23#define dr16(reg)       ioread16(ioaddr + (reg))
  24#define dr8(reg)        ioread8(ioaddr + (reg))
  25
  26static char version[] =
  27      KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
  28#define MAX_UNITS 8
  29static int mtu[MAX_UNITS];
  30static int vlan[MAX_UNITS];
  31static int jumbo[MAX_UNITS];
  32static char *media[MAX_UNITS];
  33static int tx_flow=-1;
  34static int rx_flow=-1;
  35static int copy_thresh;
  36static int rx_coalesce=10;      /* Rx frame count each interrupt */
  37static int rx_timeout=200;      /* Rx DMA wait time in 640ns increments */
  38static int tx_coalesce=16;      /* HW xmit count each TxDMAComplete */
  39
  40
  41MODULE_AUTHOR ("Edward Peng");
  42MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
  43MODULE_LICENSE("GPL");
  44module_param_array(mtu, int, NULL, 0);
  45module_param_array(media, charp, NULL, 0);
  46module_param_array(vlan, int, NULL, 0);
  47module_param_array(jumbo, int, NULL, 0);
  48module_param(tx_flow, int, 0);
  49module_param(rx_flow, int, 0);
  50module_param(copy_thresh, int, 0);
  51module_param(rx_coalesce, int, 0);      /* Rx frame count each interrupt */
  52module_param(rx_timeout, int, 0);       /* Rx DMA wait time in 64ns increments */
  53module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
  54
  55
  56/* Enable the default interrupts */
  57#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
  58       UpdateStats | LinkEvent)
  59
  60static void dl2k_enable_int(struct netdev_private *np)
  61{
  62        void __iomem *ioaddr = np->ioaddr;
  63
  64        dw16(IntEnable, DEFAULT_INTR);
  65}
  66
  67static const int max_intrloop = 50;
  68static const int multicast_filter_limit = 0x40;
  69
  70static int rio_open (struct net_device *dev);
  71static void rio_timer (unsigned long data);
  72static void rio_tx_timeout (struct net_device *dev);
  73static void alloc_list (struct net_device *dev);
  74static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
  75static irqreturn_t rio_interrupt (int irq, void *dev_instance);
  76static void rio_free_tx (struct net_device *dev, int irq);
  77static void tx_error (struct net_device *dev, int tx_status);
  78static int receive_packet (struct net_device *dev);
  79static void rio_error (struct net_device *dev, int int_status);
  80static int change_mtu (struct net_device *dev, int new_mtu);
  81static void set_multicast (struct net_device *dev);
  82static struct net_device_stats *get_stats (struct net_device *dev);
  83static int clear_stats (struct net_device *dev);
  84static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
  85static int rio_close (struct net_device *dev);
  86static int find_miiphy (struct net_device *dev);
  87static int parse_eeprom (struct net_device *dev);
  88static int read_eeprom (struct netdev_private *, int eep_addr);
  89static int mii_wait_link (struct net_device *dev, int wait);
  90static int mii_set_media (struct net_device *dev);
  91static int mii_get_media (struct net_device *dev);
  92static int mii_set_media_pcs (struct net_device *dev);
  93static int mii_get_media_pcs (struct net_device *dev);
  94static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
  95static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
  96                      u16 data);
  97
  98static const struct ethtool_ops ethtool_ops;
  99
 100static const struct net_device_ops netdev_ops = {
 101        .ndo_open               = rio_open,
 102        .ndo_start_xmit = start_xmit,
 103        .ndo_stop               = rio_close,
 104        .ndo_get_stats          = get_stats,
 105        .ndo_validate_addr      = eth_validate_addr,
 106        .ndo_set_mac_address    = eth_mac_addr,
 107        .ndo_set_rx_mode        = set_multicast,
 108        .ndo_do_ioctl           = rio_ioctl,
 109        .ndo_tx_timeout         = rio_tx_timeout,
 110        .ndo_change_mtu         = change_mtu,
 111};
 112
 113static int
 114rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
 115{
 116        struct net_device *dev;
 117        struct netdev_private *np;
 118        static int card_idx;
 119        int chip_idx = ent->driver_data;
 120        int err, irq;
 121        void __iomem *ioaddr;
 122        static int version_printed;
 123        void *ring_space;
 124        dma_addr_t ring_dma;
 125
 126        if (!version_printed++)
 127                printk ("%s", version);
 128
 129        err = pci_enable_device (pdev);
 130        if (err)
 131                return err;
 132
 133        irq = pdev->irq;
 134        err = pci_request_regions (pdev, "dl2k");
 135        if (err)
 136                goto err_out_disable;
 137
 138        pci_set_master (pdev);
 139
 140        err = -ENOMEM;
 141
 142        dev = alloc_etherdev (sizeof (*np));
 143        if (!dev)
 144                goto err_out_res;
 145        SET_NETDEV_DEV(dev, &pdev->dev);
 146
 147        np = netdev_priv(dev);
 148
 149        /* IO registers range. */
 150        ioaddr = pci_iomap(pdev, 0, 0);
 151        if (!ioaddr)
 152                goto err_out_dev;
 153        np->eeprom_addr = ioaddr;
 154
 155#ifdef MEM_MAPPING
 156        /* MM registers range. */
 157        ioaddr = pci_iomap(pdev, 1, 0);
 158        if (!ioaddr)
 159                goto err_out_iounmap;
 160#endif
 161        np->ioaddr = ioaddr;
 162        np->chip_id = chip_idx;
 163        np->pdev = pdev;
 164        spin_lock_init (&np->tx_lock);
 165        spin_lock_init (&np->rx_lock);
 166
 167        /* Parse manual configuration */
 168        np->an_enable = 1;
 169        np->tx_coalesce = 1;
 170        if (card_idx < MAX_UNITS) {
 171                if (media[card_idx] != NULL) {
 172                        np->an_enable = 0;
 173                        if (strcmp (media[card_idx], "auto") == 0 ||
 174                            strcmp (media[card_idx], "autosense") == 0 ||
 175                            strcmp (media[card_idx], "0") == 0 ) {
 176                                np->an_enable = 2;
 177                        } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 178                            strcmp (media[card_idx], "4") == 0) {
 179                                np->speed = 100;
 180                                np->full_duplex = 1;
 181                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 182                                   strcmp (media[card_idx], "3") == 0) {
 183                                np->speed = 100;
 184                                np->full_duplex = 0;
 185                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 186                                   strcmp (media[card_idx], "2") == 0) {
 187                                np->speed = 10;
 188                                np->full_duplex = 1;
 189                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 190                                   strcmp (media[card_idx], "1") == 0) {
 191                                np->speed = 10;
 192                                np->full_duplex = 0;
 193                        } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
 194                                 strcmp (media[card_idx], "6") == 0) {
 195                                np->speed=1000;
 196                                np->full_duplex=1;
 197                        } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
 198                                 strcmp (media[card_idx], "5") == 0) {
 199                                np->speed = 1000;
 200                                np->full_duplex = 0;
 201                        } else {
 202                                np->an_enable = 1;
 203                        }
 204                }
 205                if (jumbo[card_idx] != 0) {
 206                        np->jumbo = 1;
 207                        dev->mtu = MAX_JUMBO;
 208                } else {
 209                        np->jumbo = 0;
 210                        if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
 211                                dev->mtu = mtu[card_idx];
 212                }
 213                np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
 214                    vlan[card_idx] : 0;
 215                if (rx_coalesce > 0 && rx_timeout > 0) {
 216                        np->rx_coalesce = rx_coalesce;
 217                        np->rx_timeout = rx_timeout;
 218                        np->coalesce = 1;
 219                }
 220                np->tx_flow = (tx_flow == 0) ? 0 : 1;
 221                np->rx_flow = (rx_flow == 0) ? 0 : 1;
 222
 223                if (tx_coalesce < 1)
 224                        tx_coalesce = 1;
 225                else if (tx_coalesce > TX_RING_SIZE-1)
 226                        tx_coalesce = TX_RING_SIZE - 1;
 227        }
 228        dev->netdev_ops = &netdev_ops;
 229        dev->watchdog_timeo = TX_TIMEOUT;
 230        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 231#if 0
 232        dev->features = NETIF_F_IP_CSUM;
 233#endif
 234        pci_set_drvdata (pdev, dev);
 235
 236        ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
 237        if (!ring_space)
 238                goto err_out_iounmap;
 239        np->tx_ring = ring_space;
 240        np->tx_ring_dma = ring_dma;
 241
 242        ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
 243        if (!ring_space)
 244                goto err_out_unmap_tx;
 245        np->rx_ring = ring_space;
 246        np->rx_ring_dma = ring_dma;
 247
 248        /* Parse eeprom data */
 249        parse_eeprom (dev);
 250
 251        /* Find PHY address */
 252        err = find_miiphy (dev);
 253        if (err)
 254                goto err_out_unmap_rx;
 255
 256        /* Fiber device? */
 257        np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
 258        np->link_status = 0;
 259        /* Set media and reset PHY */
 260        if (np->phy_media) {
 261                /* default Auto-Negotiation for fiber deivices */
 262                if (np->an_enable == 2) {
 263                        np->an_enable = 1;
 264                }
 265                mii_set_media_pcs (dev);
 266        } else {
 267                /* Auto-Negotiation is mandatory for 1000BASE-T,
 268                   IEEE 802.3ab Annex 28D page 14 */
 269                if (np->speed == 1000)
 270                        np->an_enable = 1;
 271                mii_set_media (dev);
 272        }
 273
 274        err = register_netdev (dev);
 275        if (err)
 276                goto err_out_unmap_rx;
 277
 278        card_idx++;
 279
 280        printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
 281                dev->name, np->name, dev->dev_addr, irq);
 282        if (tx_coalesce > 1)
 283                printk(KERN_INFO "tx_coalesce:\t%d packets\n",
 284                                tx_coalesce);
 285        if (np->coalesce)
 286                printk(KERN_INFO
 287                       "rx_coalesce:\t%d packets\n"
 288                       "rx_timeout: \t%d ns\n",
 289                                np->rx_coalesce, np->rx_timeout*640);
 290        if (np->vlan)
 291                printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
 292        return 0;
 293
 294err_out_unmap_rx:
 295        pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 296err_out_unmap_tx:
 297        pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 298err_out_iounmap:
 299#ifdef MEM_MAPPING
 300        pci_iounmap(pdev, np->ioaddr);
 301#endif
 302        pci_iounmap(pdev, np->eeprom_addr);
 303err_out_dev:
 304        free_netdev (dev);
 305err_out_res:
 306        pci_release_regions (pdev);
 307err_out_disable:
 308        pci_disable_device (pdev);
 309        return err;
 310}
 311
 312static int
 313find_miiphy (struct net_device *dev)
 314{
 315        struct netdev_private *np = netdev_priv(dev);
 316        int i, phy_found = 0;
 317        np = netdev_priv(dev);
 318        np->phy_addr = 1;
 319
 320        for (i = 31; i >= 0; i--) {
 321                int mii_status = mii_read (dev, i, 1);
 322                if (mii_status != 0xffff && mii_status != 0x0000) {
 323                        np->phy_addr = i;
 324                        phy_found++;
 325                }
 326        }
 327        if (!phy_found) {
 328                printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
 329                return -ENODEV;
 330        }
 331        return 0;
 332}
 333
 334static int
 335parse_eeprom (struct net_device *dev)
 336{
 337        struct netdev_private *np = netdev_priv(dev);
 338        void __iomem *ioaddr = np->ioaddr;
 339        int i, j;
 340        u8 sromdata[256];
 341        u8 *psib;
 342        u32 crc;
 343        PSROM_t psrom = (PSROM_t) sromdata;
 344
 345        int cid, next;
 346
 347        for (i = 0; i < 128; i++)
 348                ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
 349
 350        if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {  /* D-Link Only */
 351                /* Check CRC */
 352                crc = ~ether_crc_le (256 - 4, sromdata);
 353                if (psrom->crc != cpu_to_le32(crc)) {
 354                        printk (KERN_ERR "%s: EEPROM data CRC error.\n",
 355                                        dev->name);
 356                        return -1;
 357                }
 358        }
 359
 360        /* Set MAC address */
 361        for (i = 0; i < 6; i++)
 362                dev->dev_addr[i] = psrom->mac_addr[i];
 363
 364        if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
 365                return 0;
 366        }
 367
 368        /* Parse Software Information Block */
 369        i = 0x30;
 370        psib = (u8 *) sromdata;
 371        do {
 372                cid = psib[i++];
 373                next = psib[i++];
 374                if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
 375                        printk (KERN_ERR "Cell data error\n");
 376                        return -1;
 377                }
 378                switch (cid) {
 379                case 0: /* Format version */
 380                        break;
 381                case 1: /* End of cell */
 382                        return 0;
 383                case 2: /* Duplex Polarity */
 384                        np->duplex_polarity = psib[i];
 385                        dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
 386                        break;
 387                case 3: /* Wake Polarity */
 388                        np->wake_polarity = psib[i];
 389                        break;
 390                case 9: /* Adapter description */
 391                        j = (next - i > 255) ? 255 : next - i;
 392                        memcpy (np->name, &(psib[i]), j);
 393                        break;
 394                case 4:
 395                case 5:
 396                case 6:
 397                case 7:
 398                case 8: /* Reversed */
 399                        break;
 400                default:        /* Unknown cell */
 401                        return -1;
 402                }
 403                i = next;
 404        } while (1);
 405
 406        return 0;
 407}
 408
 409static int
 410rio_open (struct net_device *dev)
 411{
 412        struct netdev_private *np = netdev_priv(dev);
 413        void __iomem *ioaddr = np->ioaddr;
 414        const int irq = np->pdev->irq;
 415        int i;
 416        u16 macctrl;
 417
 418        i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
 419        if (i)
 420                return i;
 421
 422        /* Reset all logic functions */
 423        dw16(ASICCtrl + 2,
 424             GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
 425        mdelay(10);
 426
 427        /* DebugCtrl bit 4, 5, 9 must set */
 428        dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
 429
 430        /* Jumbo frame */
 431        if (np->jumbo != 0)
 432                dw16(MaxFrameSize, MAX_JUMBO+14);
 433
 434        alloc_list (dev);
 435
 436        /* Get station address */
 437        for (i = 0; i < 6; i++)
 438                dw8(StationAddr0 + i, dev->dev_addr[i]);
 439
 440        set_multicast (dev);
 441        if (np->coalesce) {
 442                dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
 443        }
 444        /* Set RIO to poll every N*320nsec. */
 445        dw8(RxDMAPollPeriod, 0x20);
 446        dw8(TxDMAPollPeriod, 0xff);
 447        dw8(RxDMABurstThresh, 0x30);
 448        dw8(RxDMAUrgentThresh, 0x30);
 449        dw32(RmonStatMask, 0x0007ffff);
 450        /* clear statistics */
 451        clear_stats (dev);
 452
 453        /* VLAN supported */
 454        if (np->vlan) {
 455                /* priority field in RxDMAIntCtrl  */
 456                dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
 457                /* VLANId */
 458                dw16(VLANId, np->vlan);
 459                /* Length/Type should be 0x8100 */
 460                dw32(VLANTag, 0x8100 << 16 | np->vlan);
 461                /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
 462                   VLAN information tagged by TFC' VID, CFI fields. */
 463                dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
 464        }
 465
 466        init_timer (&np->timer);
 467        np->timer.expires = jiffies + 1*HZ;
 468        np->timer.data = (unsigned long) dev;
 469        np->timer.function = rio_timer;
 470        add_timer (&np->timer);
 471
 472        /* Start Tx/Rx */
 473        dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
 474
 475        macctrl = 0;
 476        macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
 477        macctrl |= (np->full_duplex) ? DuplexSelect : 0;
 478        macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
 479        macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
 480        dw16(MACCtrl, macctrl);
 481
 482        netif_start_queue (dev);
 483
 484        dl2k_enable_int(np);
 485        return 0;
 486}
 487
 488static void
 489rio_timer (unsigned long data)
 490{
 491        struct net_device *dev = (struct net_device *)data;
 492        struct netdev_private *np = netdev_priv(dev);
 493        unsigned int entry;
 494        int next_tick = 1*HZ;
 495        unsigned long flags;
 496
 497        spin_lock_irqsave(&np->rx_lock, flags);
 498        /* Recover rx ring exhausted error */
 499        if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
 500                printk(KERN_INFO "Try to recover rx ring exhausted...\n");
 501                /* Re-allocate skbuffs to fill the descriptor ring */
 502                for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
 503                        struct sk_buff *skb;
 504                        entry = np->old_rx % RX_RING_SIZE;
 505                        /* Dropped packets don't need to re-allocate */
 506                        if (np->rx_skbuff[entry] == NULL) {
 507                                skb = netdev_alloc_skb_ip_align(dev,
 508                                                                np->rx_buf_sz);
 509                                if (skb == NULL) {
 510                                        np->rx_ring[entry].fraginfo = 0;
 511                                        printk (KERN_INFO
 512                                                "%s: Still unable to re-allocate Rx skbuff.#%d\n",
 513                                                dev->name, entry);
 514                                        break;
 515                                }
 516                                np->rx_skbuff[entry] = skb;
 517                                np->rx_ring[entry].fraginfo =
 518                                    cpu_to_le64 (pci_map_single
 519                                         (np->pdev, skb->data, np->rx_buf_sz,
 520                                          PCI_DMA_FROMDEVICE));
 521                        }
 522                        np->rx_ring[entry].fraginfo |=
 523                            cpu_to_le64((u64)np->rx_buf_sz << 48);
 524                        np->rx_ring[entry].status = 0;
 525                } /* end for */
 526        } /* end if */
 527        spin_unlock_irqrestore (&np->rx_lock, flags);
 528        np->timer.expires = jiffies + next_tick;
 529        add_timer(&np->timer);
 530}
 531
 532static void
 533rio_tx_timeout (struct net_device *dev)
 534{
 535        struct netdev_private *np = netdev_priv(dev);
 536        void __iomem *ioaddr = np->ioaddr;
 537
 538        printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
 539                dev->name, dr32(TxStatus));
 540        rio_free_tx(dev, 0);
 541        dev->if_port = 0;
 542        dev->trans_start = jiffies; /* prevent tx timeout */
 543}
 544
 545 /* allocate and initialize Tx and Rx descriptors */
 546static void
 547alloc_list (struct net_device *dev)
 548{
 549        struct netdev_private *np = netdev_priv(dev);
 550        void __iomem *ioaddr = np->ioaddr;
 551        int i;
 552
 553        np->cur_rx = np->cur_tx = 0;
 554        np->old_rx = np->old_tx = 0;
 555        np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
 556
 557        /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
 558        for (i = 0; i < TX_RING_SIZE; i++) {
 559                np->tx_skbuff[i] = NULL;
 560                np->tx_ring[i].status = cpu_to_le64 (TFDDone);
 561                np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
 562                                              ((i+1)%TX_RING_SIZE) *
 563                                              sizeof (struct netdev_desc));
 564        }
 565
 566        /* Initialize Rx descriptors */
 567        for (i = 0; i < RX_RING_SIZE; i++) {
 568                np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
 569                                                ((i + 1) % RX_RING_SIZE) *
 570                                                sizeof (struct netdev_desc));
 571                np->rx_ring[i].status = 0;
 572                np->rx_ring[i].fraginfo = 0;
 573                np->rx_skbuff[i] = NULL;
 574        }
 575
 576        /* Allocate the rx buffers */
 577        for (i = 0; i < RX_RING_SIZE; i++) {
 578                /* Allocated fixed size of skbuff */
 579                struct sk_buff *skb;
 580
 581                skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 582                np->rx_skbuff[i] = skb;
 583                if (skb == NULL)
 584                        break;
 585
 586                /* Rubicon now supports 40 bits of addressing space. */
 587                np->rx_ring[i].fraginfo =
 588                    cpu_to_le64 ( pci_map_single (
 589                                  np->pdev, skb->data, np->rx_buf_sz,
 590                                  PCI_DMA_FROMDEVICE));
 591                np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
 592        }
 593
 594        /* Set RFDListPtr */
 595        dw32(RFDListPtr0, np->rx_ring_dma);
 596        dw32(RFDListPtr1, 0);
 597}
 598
 599static netdev_tx_t
 600start_xmit (struct sk_buff *skb, struct net_device *dev)
 601{
 602        struct netdev_private *np = netdev_priv(dev);
 603        void __iomem *ioaddr = np->ioaddr;
 604        struct netdev_desc *txdesc;
 605        unsigned entry;
 606        u64 tfc_vlan_tag = 0;
 607
 608        if (np->link_status == 0) {     /* Link Down */
 609                dev_kfree_skb(skb);
 610                return NETDEV_TX_OK;
 611        }
 612        entry = np->cur_tx % TX_RING_SIZE;
 613        np->tx_skbuff[entry] = skb;
 614        txdesc = &np->tx_ring[entry];
 615
 616#if 0
 617        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 618                txdesc->status |=
 619                    cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
 620                                 IPChecksumEnable);
 621        }
 622#endif
 623        if (np->vlan) {
 624                tfc_vlan_tag = VLANTagInsert |
 625                    ((u64)np->vlan << 32) |
 626                    ((u64)skb->priority << 45);
 627        }
 628        txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
 629                                                        skb->len,
 630                                                        PCI_DMA_TODEVICE));
 631        txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
 632
 633        /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
 634         * Work around: Always use 1 descriptor in 10Mbps mode */
 635        if (entry % np->tx_coalesce == 0 || np->speed == 10)
 636                txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
 637                                              WordAlignDisable |
 638                                              TxDMAIndicate |
 639                                              (1 << FragCountShift));
 640        else
 641                txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
 642                                              WordAlignDisable |
 643                                              (1 << FragCountShift));
 644
 645        /* TxDMAPollNow */
 646        dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
 647        /* Schedule ISR */
 648        dw32(CountDown, 10000);
 649        np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
 650        if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
 651                        < TX_QUEUE_LEN - 1 && np->speed != 10) {
 652                /* do nothing */
 653        } else if (!netif_queue_stopped(dev)) {
 654                netif_stop_queue (dev);
 655        }
 656
 657        /* The first TFDListPtr */
 658        if (!dr32(TFDListPtr0)) {
 659                dw32(TFDListPtr0, np->tx_ring_dma +
 660                     entry * sizeof (struct netdev_desc));
 661                dw32(TFDListPtr1, 0);
 662        }
 663
 664        return NETDEV_TX_OK;
 665}
 666
 667static irqreturn_t
 668rio_interrupt (int irq, void *dev_instance)
 669{
 670        struct net_device *dev = dev_instance;
 671        struct netdev_private *np = netdev_priv(dev);
 672        void __iomem *ioaddr = np->ioaddr;
 673        unsigned int_status;
 674        int cnt = max_intrloop;
 675        int handled = 0;
 676
 677        while (1) {
 678                int_status = dr16(IntStatus);
 679                dw16(IntStatus, int_status);
 680                int_status &= DEFAULT_INTR;
 681                if (int_status == 0 || --cnt < 0)
 682                        break;
 683                handled = 1;
 684                /* Processing received packets */
 685                if (int_status & RxDMAComplete)
 686                        receive_packet (dev);
 687                /* TxDMAComplete interrupt */
 688                if ((int_status & (TxDMAComplete|IntRequested))) {
 689                        int tx_status;
 690                        tx_status = dr32(TxStatus);
 691                        if (tx_status & 0x01)
 692                                tx_error (dev, tx_status);
 693                        /* Free used tx skbuffs */
 694                        rio_free_tx (dev, 1);
 695                }
 696
 697                /* Handle uncommon events */
 698                if (int_status &
 699                    (HostError | LinkEvent | UpdateStats))
 700                        rio_error (dev, int_status);
 701        }
 702        if (np->cur_tx != np->old_tx)
 703                dw32(CountDown, 100);
 704        return IRQ_RETVAL(handled);
 705}
 706
 707static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
 708{
 709        return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
 710}
 711
 712static void
 713rio_free_tx (struct net_device *dev, int irq)
 714{
 715        struct netdev_private *np = netdev_priv(dev);
 716        int entry = np->old_tx % TX_RING_SIZE;
 717        int tx_use = 0;
 718        unsigned long flag = 0;
 719
 720        if (irq)
 721                spin_lock(&np->tx_lock);
 722        else
 723                spin_lock_irqsave(&np->tx_lock, flag);
 724
 725        /* Free used tx skbuffs */
 726        while (entry != np->cur_tx) {
 727                struct sk_buff *skb;
 728
 729                if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
 730                        break;
 731                skb = np->tx_skbuff[entry];
 732                pci_unmap_single (np->pdev,
 733                                  desc_to_dma(&np->tx_ring[entry]),
 734                                  skb->len, PCI_DMA_TODEVICE);
 735                if (irq)
 736                        dev_kfree_skb_irq (skb);
 737                else
 738                        dev_kfree_skb (skb);
 739
 740                np->tx_skbuff[entry] = NULL;
 741                entry = (entry + 1) % TX_RING_SIZE;
 742                tx_use++;
 743        }
 744        if (irq)
 745                spin_unlock(&np->tx_lock);
 746        else
 747                spin_unlock_irqrestore(&np->tx_lock, flag);
 748        np->old_tx = entry;
 749
 750        /* If the ring is no longer full, clear tx_full and
 751           call netif_wake_queue() */
 752
 753        if (netif_queue_stopped(dev) &&
 754            ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
 755            < TX_QUEUE_LEN - 1 || np->speed == 10)) {
 756                netif_wake_queue (dev);
 757        }
 758}
 759
 760static void
 761tx_error (struct net_device *dev, int tx_status)
 762{
 763        struct netdev_private *np = netdev_priv(dev);
 764        void __iomem *ioaddr = np->ioaddr;
 765        int frame_id;
 766        int i;
 767
 768        frame_id = (tx_status & 0xffff0000);
 769        printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
 770                dev->name, tx_status, frame_id);
 771        np->stats.tx_errors++;
 772        /* Ttransmit Underrun */
 773        if (tx_status & 0x10) {
 774                np->stats.tx_fifo_errors++;
 775                dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
 776                /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
 777                dw16(ASICCtrl + 2,
 778                     TxReset | DMAReset | FIFOReset | NetworkReset);
 779                /* Wait for ResetBusy bit clear */
 780                for (i = 50; i > 0; i--) {
 781                        if (!(dr16(ASICCtrl + 2) & ResetBusy))
 782                                break;
 783                        mdelay (1);
 784                }
 785                rio_free_tx (dev, 1);
 786                /* Reset TFDListPtr */
 787                dw32(TFDListPtr0, np->tx_ring_dma +
 788                     np->old_tx * sizeof (struct netdev_desc));
 789                dw32(TFDListPtr1, 0);
 790
 791                /* Let TxStartThresh stay default value */
 792        }
 793        /* Late Collision */
 794        if (tx_status & 0x04) {
 795                np->stats.tx_fifo_errors++;
 796                /* TxReset and clear FIFO */
 797                dw16(ASICCtrl + 2, TxReset | FIFOReset);
 798                /* Wait reset done */
 799                for (i = 50; i > 0; i--) {
 800                        if (!(dr16(ASICCtrl + 2) & ResetBusy))
 801                                break;
 802                        mdelay (1);
 803                }
 804                /* Let TxStartThresh stay default value */
 805        }
 806        /* Maximum Collisions */
 807#ifdef ETHER_STATS
 808        if (tx_status & 0x08)
 809                np->stats.collisions16++;
 810#else
 811        if (tx_status & 0x08)
 812                np->stats.collisions++;
 813#endif
 814        /* Restart the Tx */
 815        dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
 816}
 817
 818static int
 819receive_packet (struct net_device *dev)
 820{
 821        struct netdev_private *np = netdev_priv(dev);
 822        int entry = np->cur_rx % RX_RING_SIZE;
 823        int cnt = 30;
 824
 825        /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
 826        while (1) {
 827                struct netdev_desc *desc = &np->rx_ring[entry];
 828                int pkt_len;
 829                u64 frame_status;
 830
 831                if (!(desc->status & cpu_to_le64(RFDDone)) ||
 832                    !(desc->status & cpu_to_le64(FrameStart)) ||
 833                    !(desc->status & cpu_to_le64(FrameEnd)))
 834                        break;
 835
 836                /* Chip omits the CRC. */
 837                frame_status = le64_to_cpu(desc->status);
 838                pkt_len = frame_status & 0xffff;
 839                if (--cnt < 0)
 840                        break;
 841                /* Update rx error statistics, drop packet. */
 842                if (frame_status & RFS_Errors) {
 843                        np->stats.rx_errors++;
 844                        if (frame_status & (RxRuntFrame | RxLengthError))
 845                                np->stats.rx_length_errors++;
 846                        if (frame_status & RxFCSError)
 847                                np->stats.rx_crc_errors++;
 848                        if (frame_status & RxAlignmentError && np->speed != 1000)
 849                                np->stats.rx_frame_errors++;
 850                        if (frame_status & RxFIFOOverrun)
 851                                np->stats.rx_fifo_errors++;
 852                } else {
 853                        struct sk_buff *skb;
 854
 855                        /* Small skbuffs for short packets */
 856                        if (pkt_len > copy_thresh) {
 857                                pci_unmap_single (np->pdev,
 858                                                  desc_to_dma(desc),
 859                                                  np->rx_buf_sz,
 860                                                  PCI_DMA_FROMDEVICE);
 861                                skb_put (skb = np->rx_skbuff[entry], pkt_len);
 862                                np->rx_skbuff[entry] = NULL;
 863                        } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
 864                                pci_dma_sync_single_for_cpu(np->pdev,
 865                                                            desc_to_dma(desc),
 866                                                            np->rx_buf_sz,
 867                                                            PCI_DMA_FROMDEVICE);
 868                                skb_copy_to_linear_data (skb,
 869                                                  np->rx_skbuff[entry]->data,
 870                                                  pkt_len);
 871                                skb_put (skb, pkt_len);
 872                                pci_dma_sync_single_for_device(np->pdev,
 873                                                               desc_to_dma(desc),
 874                                                               np->rx_buf_sz,
 875                                                               PCI_DMA_FROMDEVICE);
 876                        }
 877                        skb->protocol = eth_type_trans (skb, dev);
 878#if 0
 879                        /* Checksum done by hw, but csum value unavailable. */
 880                        if (np->pdev->pci_rev_id >= 0x0c &&
 881                                !(frame_status & (TCPError | UDPError | IPError))) {
 882                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 883                        }
 884#endif
 885                        netif_rx (skb);
 886                }
 887                entry = (entry + 1) % RX_RING_SIZE;
 888        }
 889        spin_lock(&np->rx_lock);
 890        np->cur_rx = entry;
 891        /* Re-allocate skbuffs to fill the descriptor ring */
 892        entry = np->old_rx;
 893        while (entry != np->cur_rx) {
 894                struct sk_buff *skb;
 895                /* Dropped packets don't need to re-allocate */
 896                if (np->rx_skbuff[entry] == NULL) {
 897                        skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 898                        if (skb == NULL) {
 899                                np->rx_ring[entry].fraginfo = 0;
 900                                printk (KERN_INFO
 901                                        "%s: receive_packet: "
 902                                        "Unable to re-allocate Rx skbuff.#%d\n",
 903                                        dev->name, entry);
 904                                break;
 905                        }
 906                        np->rx_skbuff[entry] = skb;
 907                        np->rx_ring[entry].fraginfo =
 908                            cpu_to_le64 (pci_map_single
 909                                         (np->pdev, skb->data, np->rx_buf_sz,
 910                                          PCI_DMA_FROMDEVICE));
 911                }
 912                np->rx_ring[entry].fraginfo |=
 913                    cpu_to_le64((u64)np->rx_buf_sz << 48);
 914                np->rx_ring[entry].status = 0;
 915                entry = (entry + 1) % RX_RING_SIZE;
 916        }
 917        np->old_rx = entry;
 918        spin_unlock(&np->rx_lock);
 919        return 0;
 920}
 921
 922static void
 923rio_error (struct net_device *dev, int int_status)
 924{
 925        struct netdev_private *np = netdev_priv(dev);
 926        void __iomem *ioaddr = np->ioaddr;
 927        u16 macctrl;
 928
 929        /* Link change event */
 930        if (int_status & LinkEvent) {
 931                if (mii_wait_link (dev, 10) == 0) {
 932                        printk (KERN_INFO "%s: Link up\n", dev->name);
 933                        if (np->phy_media)
 934                                mii_get_media_pcs (dev);
 935                        else
 936                                mii_get_media (dev);
 937                        if (np->speed == 1000)
 938                                np->tx_coalesce = tx_coalesce;
 939                        else
 940                                np->tx_coalesce = 1;
 941                        macctrl = 0;
 942                        macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
 943                        macctrl |= (np->full_duplex) ? DuplexSelect : 0;
 944                        macctrl |= (np->tx_flow) ?
 945                                TxFlowControlEnable : 0;
 946                        macctrl |= (np->rx_flow) ?
 947                                RxFlowControlEnable : 0;
 948                        dw16(MACCtrl, macctrl);
 949                        np->link_status = 1;
 950                        netif_carrier_on(dev);
 951                } else {
 952                        printk (KERN_INFO "%s: Link off\n", dev->name);
 953                        np->link_status = 0;
 954                        netif_carrier_off(dev);
 955                }
 956        }
 957
 958        /* UpdateStats statistics registers */
 959        if (int_status & UpdateStats) {
 960                get_stats (dev);
 961        }
 962
 963        /* PCI Error, a catastronphic error related to the bus interface
 964           occurs, set GlobalReset and HostReset to reset. */
 965        if (int_status & HostError) {
 966                printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
 967                        dev->name, int_status);
 968                dw16(ASICCtrl + 2, GlobalReset | HostReset);
 969                mdelay (500);
 970        }
 971}
 972
 973static struct net_device_stats *
 974get_stats (struct net_device *dev)
 975{
 976        struct netdev_private *np = netdev_priv(dev);
 977        void __iomem *ioaddr = np->ioaddr;
 978#ifdef MEM_MAPPING
 979        int i;
 980#endif
 981        unsigned int stat_reg;
 982
 983        /* All statistics registers need to be acknowledged,
 984           else statistic overflow could cause problems */
 985
 986        np->stats.rx_packets += dr32(FramesRcvOk);
 987        np->stats.tx_packets += dr32(FramesXmtOk);
 988        np->stats.rx_bytes += dr32(OctetRcvOk);
 989        np->stats.tx_bytes += dr32(OctetXmtOk);
 990
 991        np->stats.multicast = dr32(McstFramesRcvdOk);
 992        np->stats.collisions += dr32(SingleColFrames)
 993                             +  dr32(MultiColFrames);
 994
 995        /* detailed tx errors */
 996        stat_reg = dr16(FramesAbortXSColls);
 997        np->stats.tx_aborted_errors += stat_reg;
 998        np->stats.tx_errors += stat_reg;
 999
1000        stat_reg = dr16(CarrierSenseErrors);
1001        np->stats.tx_carrier_errors += stat_reg;
1002        np->stats.tx_errors += stat_reg;
1003
1004        /* Clear all other statistic register. */
1005        dr32(McstOctetXmtOk);
1006        dr16(BcstFramesXmtdOk);
1007        dr32(McstFramesXmtdOk);
1008        dr16(BcstFramesRcvdOk);
1009        dr16(MacControlFramesRcvd);
1010        dr16(FrameTooLongErrors);
1011        dr16(InRangeLengthErrors);
1012        dr16(FramesCheckSeqErrors);
1013        dr16(FramesLostRxErrors);
1014        dr32(McstOctetXmtOk);
1015        dr32(BcstOctetXmtOk);
1016        dr32(McstFramesXmtdOk);
1017        dr32(FramesWDeferredXmt);
1018        dr32(LateCollisions);
1019        dr16(BcstFramesXmtdOk);
1020        dr16(MacControlFramesXmtd);
1021        dr16(FramesWEXDeferal);
1022
1023#ifdef MEM_MAPPING
1024        for (i = 0x100; i <= 0x150; i += 4)
1025                dr32(i);
1026#endif
1027        dr16(TxJumboFrames);
1028        dr16(RxJumboFrames);
1029        dr16(TCPCheckSumErrors);
1030        dr16(UDPCheckSumErrors);
1031        dr16(IPCheckSumErrors);
1032        return &np->stats;
1033}
1034
1035static int
1036clear_stats (struct net_device *dev)
1037{
1038        struct netdev_private *np = netdev_priv(dev);
1039        void __iomem *ioaddr = np->ioaddr;
1040#ifdef MEM_MAPPING
1041        int i;
1042#endif
1043
1044        /* All statistics registers need to be acknowledged,
1045           else statistic overflow could cause problems */
1046        dr32(FramesRcvOk);
1047        dr32(FramesXmtOk);
1048        dr32(OctetRcvOk);
1049        dr32(OctetXmtOk);
1050
1051        dr32(McstFramesRcvdOk);
1052        dr32(SingleColFrames);
1053        dr32(MultiColFrames);
1054        dr32(LateCollisions);
1055        /* detailed rx errors */
1056        dr16(FrameTooLongErrors);
1057        dr16(InRangeLengthErrors);
1058        dr16(FramesCheckSeqErrors);
1059        dr16(FramesLostRxErrors);
1060
1061        /* detailed tx errors */
1062        dr16(FramesAbortXSColls);
1063        dr16(CarrierSenseErrors);
1064
1065        /* Clear all other statistic register. */
1066        dr32(McstOctetXmtOk);
1067        dr16(BcstFramesXmtdOk);
1068        dr32(McstFramesXmtdOk);
1069        dr16(BcstFramesRcvdOk);
1070        dr16(MacControlFramesRcvd);
1071        dr32(McstOctetXmtOk);
1072        dr32(BcstOctetXmtOk);
1073        dr32(McstFramesXmtdOk);
1074        dr32(FramesWDeferredXmt);
1075        dr16(BcstFramesXmtdOk);
1076        dr16(MacControlFramesXmtd);
1077        dr16(FramesWEXDeferal);
1078#ifdef MEM_MAPPING
1079        for (i = 0x100; i <= 0x150; i += 4)
1080                dr32(i);
1081#endif
1082        dr16(TxJumboFrames);
1083        dr16(RxJumboFrames);
1084        dr16(TCPCheckSumErrors);
1085        dr16(UDPCheckSumErrors);
1086        dr16(IPCheckSumErrors);
1087        return 0;
1088}
1089
1090
1091static int
1092change_mtu (struct net_device *dev, int new_mtu)
1093{
1094        struct netdev_private *np = netdev_priv(dev);
1095        int max = (np->jumbo) ? MAX_JUMBO : 1536;
1096
1097        if ((new_mtu < 68) || (new_mtu > max)) {
1098                return -EINVAL;
1099        }
1100
1101        dev->mtu = new_mtu;
1102
1103        return 0;
1104}
1105
1106static void
1107set_multicast (struct net_device *dev)
1108{
1109        struct netdev_private *np = netdev_priv(dev);
1110        void __iomem *ioaddr = np->ioaddr;
1111        u32 hash_table[2];
1112        u16 rx_mode = 0;
1113
1114        hash_table[0] = hash_table[1] = 0;
1115        /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1116        hash_table[1] |= 0x02000000;
1117        if (dev->flags & IFF_PROMISC) {
1118                /* Receive all frames promiscuously. */
1119                rx_mode = ReceiveAllFrames;
1120        } else if ((dev->flags & IFF_ALLMULTI) ||
1121                        (netdev_mc_count(dev) > multicast_filter_limit)) {
1122                /* Receive broadcast and multicast frames */
1123                rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1124        } else if (!netdev_mc_empty(dev)) {
1125                struct netdev_hw_addr *ha;
1126                /* Receive broadcast frames and multicast frames filtering
1127                   by Hashtable */
1128                rx_mode =
1129                    ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1130                netdev_for_each_mc_addr(ha, dev) {
1131                        int bit, index = 0;
1132                        int crc = ether_crc_le(ETH_ALEN, ha->addr);
1133                        /* The inverted high significant 6 bits of CRC are
1134                           used as an index to hashtable */
1135                        for (bit = 0; bit < 6; bit++)
1136                                if (crc & (1 << (31 - bit)))
1137                                        index |= (1 << bit);
1138                        hash_table[index / 32] |= (1 << (index % 32));
1139                }
1140        } else {
1141                rx_mode = ReceiveBroadcast | ReceiveUnicast;
1142        }
1143        if (np->vlan) {
1144                /* ReceiveVLANMatch field in ReceiveMode */
1145                rx_mode |= ReceiveVLANMatch;
1146        }
1147
1148        dw32(HashTable0, hash_table[0]);
1149        dw32(HashTable1, hash_table[1]);
1150        dw16(ReceiveMode, rx_mode);
1151}
1152
1153static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1154{
1155        struct netdev_private *np = netdev_priv(dev);
1156
1157        strlcpy(info->driver, "dl2k", sizeof(info->driver));
1158        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1159        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1160}
1161
1162static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1163{
1164        struct netdev_private *np = netdev_priv(dev);
1165        if (np->phy_media) {
1166                /* fiber device */
1167                cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1168                cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
1169                cmd->port = PORT_FIBRE;
1170                cmd->transceiver = XCVR_INTERNAL;
1171        } else {
1172                /* copper device */
1173                cmd->supported = SUPPORTED_10baseT_Half |
1174                        SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
1175                        | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
1176                        SUPPORTED_Autoneg | SUPPORTED_MII;
1177                cmd->advertising = ADVERTISED_10baseT_Half |
1178                        ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
1179                        ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
1180                        ADVERTISED_Autoneg | ADVERTISED_MII;
1181                cmd->port = PORT_MII;
1182                cmd->transceiver = XCVR_INTERNAL;
1183        }
1184        if ( np->link_status ) {
1185                ethtool_cmd_speed_set(cmd, np->speed);
1186                cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1187        } else {
1188                ethtool_cmd_speed_set(cmd, -1);
1189                cmd->duplex = -1;
1190        }
1191        if ( np->an_enable)
1192                cmd->autoneg = AUTONEG_ENABLE;
1193        else
1194                cmd->autoneg = AUTONEG_DISABLE;
1195
1196        cmd->phy_address = np->phy_addr;
1197        return 0;
1198}
1199
1200static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1201{
1202        struct netdev_private *np = netdev_priv(dev);
1203        netif_carrier_off(dev);
1204        if (cmd->autoneg == AUTONEG_ENABLE) {
1205                if (np->an_enable)
1206                        return 0;
1207                else {
1208                        np->an_enable = 1;
1209                        mii_set_media(dev);
1210                        return 0;
1211                }
1212        } else {
1213                np->an_enable = 0;
1214                if (np->speed == 1000) {
1215                        ethtool_cmd_speed_set(cmd, SPEED_100);
1216                        cmd->duplex = DUPLEX_FULL;
1217                        printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1218                }
1219                switch (ethtool_cmd_speed(cmd)) {
1220                case SPEED_10:
1221                        np->speed = 10;
1222                        np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1223                        break;
1224                case SPEED_100:
1225                        np->speed = 100;
1226                        np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1227                        break;
1228                case SPEED_1000: /* not supported */
1229                default:
1230                        return -EINVAL;
1231                }
1232                mii_set_media(dev);
1233        }
1234        return 0;
1235}
1236
1237static u32 rio_get_link(struct net_device *dev)
1238{
1239        struct netdev_private *np = netdev_priv(dev);
1240        return np->link_status;
1241}
1242
1243static const struct ethtool_ops ethtool_ops = {
1244        .get_drvinfo = rio_get_drvinfo,
1245        .get_settings = rio_get_settings,
1246        .set_settings = rio_set_settings,
1247        .get_link = rio_get_link,
1248};
1249
1250static int
1251rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1252{
1253        int phy_addr;
1254        struct netdev_private *np = netdev_priv(dev);
1255        struct mii_ioctl_data *miidata = if_mii(rq);
1256
1257        phy_addr = np->phy_addr;
1258        switch (cmd) {
1259        case SIOCGMIIPHY:
1260                miidata->phy_id = phy_addr;
1261                break;
1262        case SIOCGMIIREG:
1263                miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
1264                break;
1265        case SIOCSMIIREG:
1266                if (!capable(CAP_NET_ADMIN))
1267                        return -EPERM;
1268                mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
1269                break;
1270        default:
1271                return -EOPNOTSUPP;
1272        }
1273        return 0;
1274}
1275
1276#define EEP_READ 0x0200
1277#define EEP_BUSY 0x8000
1278/* Read the EEPROM word */
1279/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1280static int read_eeprom(struct netdev_private *np, int eep_addr)
1281{
1282        void __iomem *ioaddr = np->eeprom_addr;
1283        int i = 1000;
1284
1285        dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
1286        while (i-- > 0) {
1287                if (!(dr16(EepromCtrl) & EEP_BUSY))
1288                        return dr16(EepromData);
1289        }
1290        return 0;
1291}
1292
1293enum phy_ctrl_bits {
1294        MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
1295        MII_DUPLEX = 0x08,
1296};
1297
1298#define mii_delay() dr8(PhyCtrl)
1299static void
1300mii_sendbit (struct net_device *dev, u32 data)
1301{
1302        struct netdev_private *np = netdev_priv(dev);
1303        void __iomem *ioaddr = np->ioaddr;
1304
1305        data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
1306        dw8(PhyCtrl, data);
1307        mii_delay ();
1308        dw8(PhyCtrl, data | MII_CLK);
1309        mii_delay ();
1310}
1311
1312static int
1313mii_getbit (struct net_device *dev)
1314{
1315        struct netdev_private *np = netdev_priv(dev);
1316        void __iomem *ioaddr = np->ioaddr;
1317        u8 data;
1318
1319        data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
1320        dw8(PhyCtrl, data);
1321        mii_delay ();
1322        dw8(PhyCtrl, data | MII_CLK);
1323        mii_delay ();
1324        return (dr8(PhyCtrl) >> 1) & 1;
1325}
1326
1327static void
1328mii_send_bits (struct net_device *dev, u32 data, int len)
1329{
1330        int i;
1331
1332        for (i = len - 1; i >= 0; i--) {
1333                mii_sendbit (dev, data & (1 << i));
1334        }
1335}
1336
1337static int
1338mii_read (struct net_device *dev, int phy_addr, int reg_num)
1339{
1340        u32 cmd;
1341        int i;
1342        u32 retval = 0;
1343
1344        /* Preamble */
1345        mii_send_bits (dev, 0xffffffff, 32);
1346        /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1347        /* ST,OP = 0110'b for read operation */
1348        cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1349        mii_send_bits (dev, cmd, 14);
1350        /* Turnaround */
1351        if (mii_getbit (dev))
1352                goto err_out;
1353        /* Read data */
1354        for (i = 0; i < 16; i++) {
1355                retval |= mii_getbit (dev);
1356                retval <<= 1;
1357        }
1358        /* End cycle */
1359        mii_getbit (dev);
1360        return (retval >> 1) & 0xffff;
1361
1362      err_out:
1363        return 0;
1364}
1365static int
1366mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1367{
1368        u32 cmd;
1369
1370        /* Preamble */
1371        mii_send_bits (dev, 0xffffffff, 32);
1372        /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1373        /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1374        cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1375        mii_send_bits (dev, cmd, 32);
1376        /* End cycle */
1377        mii_getbit (dev);
1378        return 0;
1379}
1380static int
1381mii_wait_link (struct net_device *dev, int wait)
1382{
1383        __u16 bmsr;
1384        int phy_addr;
1385        struct netdev_private *np;
1386
1387        np = netdev_priv(dev);
1388        phy_addr = np->phy_addr;
1389
1390        do {
1391                bmsr = mii_read (dev, phy_addr, MII_BMSR);
1392                if (bmsr & BMSR_LSTATUS)
1393                        return 0;
1394                mdelay (1);
1395        } while (--wait > 0);
1396        return -1;
1397}
1398static int
1399mii_get_media (struct net_device *dev)
1400{
1401        __u16 negotiate;
1402        __u16 bmsr;
1403        __u16 mscr;
1404        __u16 mssr;
1405        int phy_addr;
1406        struct netdev_private *np;
1407
1408        np = netdev_priv(dev);
1409        phy_addr = np->phy_addr;
1410
1411        bmsr = mii_read (dev, phy_addr, MII_BMSR);
1412        if (np->an_enable) {
1413                if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1414                        /* Auto-Negotiation not completed */
1415                        return -1;
1416                }
1417                negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
1418                        mii_read (dev, phy_addr, MII_LPA);
1419                mscr = mii_read (dev, phy_addr, MII_CTRL1000);
1420                mssr = mii_read (dev, phy_addr, MII_STAT1000);
1421                if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
1422                        np->speed = 1000;
1423                        np->full_duplex = 1;
1424                        printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1425                } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
1426                        np->speed = 1000;
1427                        np->full_duplex = 0;
1428                        printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1429                } else if (negotiate & ADVERTISE_100FULL) {
1430                        np->speed = 100;
1431                        np->full_duplex = 1;
1432                        printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1433                } else if (negotiate & ADVERTISE_100HALF) {
1434                        np->speed = 100;
1435                        np->full_duplex = 0;
1436                        printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1437                } else if (negotiate & ADVERTISE_10FULL) {
1438                        np->speed = 10;
1439                        np->full_duplex = 1;
1440                        printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1441                } else if (negotiate & ADVERTISE_10HALF) {
1442                        np->speed = 10;
1443                        np->full_duplex = 0;
1444                        printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1445                }
1446                if (negotiate & ADVERTISE_PAUSE_CAP) {
1447                        np->tx_flow &= 1;
1448                        np->rx_flow &= 1;
1449                } else if (negotiate & ADVERTISE_PAUSE_ASYM) {
1450                        np->tx_flow = 0;
1451                        np->rx_flow &= 1;
1452                }
1453                /* else tx_flow, rx_flow = user select  */
1454        } else {
1455                __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
1456                switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
1457                case BMCR_SPEED1000:
1458                        printk (KERN_INFO "Operating at 1000 Mbps, ");
1459                        break;
1460                case BMCR_SPEED100:
1461                        printk (KERN_INFO "Operating at 100 Mbps, ");
1462                        break;
1463                case 0:
1464                        printk (KERN_INFO "Operating at 10 Mbps, ");
1465                }
1466                if (bmcr & BMCR_FULLDPLX) {
1467                        printk (KERN_CONT "Full duplex\n");
1468                } else {
1469                        printk (KERN_CONT "Half duplex\n");
1470                }
1471        }
1472        if (np->tx_flow)
1473                printk(KERN_INFO "Enable Tx Flow Control\n");
1474        else
1475                printk(KERN_INFO "Disable Tx Flow Control\n");
1476        if (np->rx_flow)
1477                printk(KERN_INFO "Enable Rx Flow Control\n");
1478        else
1479                printk(KERN_INFO "Disable Rx Flow Control\n");
1480
1481        return 0;
1482}
1483
1484static int
1485mii_set_media (struct net_device *dev)
1486{
1487        __u16 pscr;
1488        __u16 bmcr;
1489        __u16 bmsr;
1490        __u16 anar;
1491        int phy_addr;
1492        struct netdev_private *np;
1493        np = netdev_priv(dev);
1494        phy_addr = np->phy_addr;
1495
1496        /* Does user set speed? */
1497        if (np->an_enable) {
1498                /* Advertise capabilities */
1499                bmsr = mii_read (dev, phy_addr, MII_BMSR);
1500                anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
1501                        ~(ADVERTISE_100FULL | ADVERTISE_10FULL |
1502                          ADVERTISE_100HALF | ADVERTISE_10HALF |
1503                          ADVERTISE_100BASE4);
1504                if (bmsr & BMSR_100FULL)
1505                        anar |= ADVERTISE_100FULL;
1506                if (bmsr & BMSR_100HALF)
1507                        anar |= ADVERTISE_100HALF;
1508                if (bmsr & BMSR_100BASE4)
1509                        anar |= ADVERTISE_100BASE4;
1510                if (bmsr & BMSR_10FULL)
1511                        anar |= ADVERTISE_10FULL;
1512                if (bmsr & BMSR_10HALF)
1513                        anar |= ADVERTISE_10HALF;
1514                anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1515                mii_write (dev, phy_addr, MII_ADVERTISE, anar);
1516
1517                /* Enable Auto crossover */
1518                pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1519                pscr |= 3 << 5; /* 11'b */
1520                mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1521
1522                /* Soft reset PHY */
1523                mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
1524                bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
1525                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1526                mdelay(1);
1527        } else {
1528                /* Force speed setting */
1529                /* 1) Disable Auto crossover */
1530                pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1531                pscr &= ~(3 << 5);
1532                mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1533
1534                /* 2) PHY Reset */
1535                bmcr = mii_read (dev, phy_addr, MII_BMCR);
1536                bmcr |= BMCR_RESET;
1537                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1538
1539                /* 3) Power Down */
1540                bmcr = 0x1940;  /* must be 0x1940 */
1541                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1542                mdelay (100);   /* wait a certain time */
1543
1544                /* 4) Advertise nothing */
1545                mii_write (dev, phy_addr, MII_ADVERTISE, 0);
1546
1547                /* 5) Set media and Power Up */
1548                bmcr = BMCR_PDOWN;
1549                if (np->speed == 100) {
1550                        bmcr |= BMCR_SPEED100;
1551                        printk (KERN_INFO "Manual 100 Mbps, ");
1552                } else if (np->speed == 10) {
1553                        printk (KERN_INFO "Manual 10 Mbps, ");
1554                }
1555                if (np->full_duplex) {
1556                        bmcr |= BMCR_FULLDPLX;
1557                        printk (KERN_CONT "Full duplex\n");
1558                } else {
1559                        printk (KERN_CONT "Half duplex\n");
1560                }
1561#if 0
1562                /* Set 1000BaseT Master/Slave setting */
1563                mscr = mii_read (dev, phy_addr, MII_CTRL1000);
1564                mscr |= MII_MSCR_CFG_ENABLE;
1565                mscr &= ~MII_MSCR_CFG_VALUE = 0;
1566#endif
1567                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1568                mdelay(10);
1569        }
1570        return 0;
1571}
1572
1573static int
1574mii_get_media_pcs (struct net_device *dev)
1575{
1576        __u16 negotiate;
1577        __u16 bmsr;
1578        int phy_addr;
1579        struct netdev_private *np;
1580
1581        np = netdev_priv(dev);
1582        phy_addr = np->phy_addr;
1583
1584        bmsr = mii_read (dev, phy_addr, PCS_BMSR);
1585        if (np->an_enable) {
1586                if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1587                        /* Auto-Negotiation not completed */
1588                        return -1;
1589                }
1590                negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
1591                        mii_read (dev, phy_addr, PCS_ANLPAR);
1592                np->speed = 1000;
1593                if (negotiate & PCS_ANAR_FULL_DUPLEX) {
1594                        printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1595                        np->full_duplex = 1;
1596                } else {
1597                        printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1598                        np->full_duplex = 0;
1599                }
1600                if (negotiate & PCS_ANAR_PAUSE) {
1601                        np->tx_flow &= 1;
1602                        np->rx_flow &= 1;
1603                } else if (negotiate & PCS_ANAR_ASYMMETRIC) {
1604                        np->tx_flow = 0;
1605                        np->rx_flow &= 1;
1606                }
1607                /* else tx_flow, rx_flow = user select  */
1608        } else {
1609                __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
1610                printk (KERN_INFO "Operating at 1000 Mbps, ");
1611                if (bmcr & BMCR_FULLDPLX) {
1612                        printk (KERN_CONT "Full duplex\n");
1613                } else {
1614                        printk (KERN_CONT "Half duplex\n");
1615                }
1616        }
1617        if (np->tx_flow)
1618                printk(KERN_INFO "Enable Tx Flow Control\n");
1619        else
1620                printk(KERN_INFO "Disable Tx Flow Control\n");
1621        if (np->rx_flow)
1622                printk(KERN_INFO "Enable Rx Flow Control\n");
1623        else
1624                printk(KERN_INFO "Disable Rx Flow Control\n");
1625
1626        return 0;
1627}
1628
1629static int
1630mii_set_media_pcs (struct net_device *dev)
1631{
1632        __u16 bmcr;
1633        __u16 esr;
1634        __u16 anar;
1635        int phy_addr;
1636        struct netdev_private *np;
1637        np = netdev_priv(dev);
1638        phy_addr = np->phy_addr;
1639
1640        /* Auto-Negotiation? */
1641        if (np->an_enable) {
1642                /* Advertise capabilities */
1643                esr = mii_read (dev, phy_addr, PCS_ESR);
1644                anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
1645                        ~PCS_ANAR_HALF_DUPLEX &
1646                        ~PCS_ANAR_FULL_DUPLEX;
1647                if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
1648                        anar |= PCS_ANAR_HALF_DUPLEX;
1649                if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
1650                        anar |= PCS_ANAR_FULL_DUPLEX;
1651                anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
1652                mii_write (dev, phy_addr, MII_ADVERTISE, anar);
1653
1654                /* Soft reset PHY */
1655                mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
1656                bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
1657                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1658                mdelay(1);
1659        } else {
1660                /* Force speed setting */
1661                /* PHY Reset */
1662                bmcr = BMCR_RESET;
1663                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1664                mdelay(10);
1665                if (np->full_duplex) {
1666                        bmcr = BMCR_FULLDPLX;
1667                        printk (KERN_INFO "Manual full duplex\n");
1668                } else {
1669                        bmcr = 0;
1670                        printk (KERN_INFO "Manual half duplex\n");
1671                }
1672                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1673                mdelay(10);
1674
1675                /*  Advertise nothing */
1676                mii_write (dev, phy_addr, MII_ADVERTISE, 0);
1677        }
1678        return 0;
1679}
1680
1681
1682static int
1683rio_close (struct net_device *dev)
1684{
1685        struct netdev_private *np = netdev_priv(dev);
1686        void __iomem *ioaddr = np->ioaddr;
1687
1688        struct pci_dev *pdev = np->pdev;
1689        struct sk_buff *skb;
1690        int i;
1691
1692        netif_stop_queue (dev);
1693
1694        /* Disable interrupts */
1695        dw16(IntEnable, 0);
1696
1697        /* Stop Tx and Rx logics */
1698        dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
1699
1700        free_irq(pdev->irq, dev);
1701        del_timer_sync (&np->timer);
1702
1703        /* Free all the skbuffs in the queue. */
1704        for (i = 0; i < RX_RING_SIZE; i++) {
1705                skb = np->rx_skbuff[i];
1706                if (skb) {
1707                        pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1708                                         skb->len, PCI_DMA_FROMDEVICE);
1709                        dev_kfree_skb (skb);
1710                        np->rx_skbuff[i] = NULL;
1711                }
1712                np->rx_ring[i].status = 0;
1713                np->rx_ring[i].fraginfo = 0;
1714        }
1715        for (i = 0; i < TX_RING_SIZE; i++) {
1716                skb = np->tx_skbuff[i];
1717                if (skb) {
1718                        pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1719                                         skb->len, PCI_DMA_TODEVICE);
1720                        dev_kfree_skb (skb);
1721                        np->tx_skbuff[i] = NULL;
1722                }
1723        }
1724
1725        return 0;
1726}
1727
1728static void
1729rio_remove1 (struct pci_dev *pdev)
1730{
1731        struct net_device *dev = pci_get_drvdata (pdev);
1732
1733        if (dev) {
1734                struct netdev_private *np = netdev_priv(dev);
1735
1736                unregister_netdev (dev);
1737                pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
1738                                     np->rx_ring_dma);
1739                pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1740                                     np->tx_ring_dma);
1741#ifdef MEM_MAPPING
1742                pci_iounmap(pdev, np->ioaddr);
1743#endif
1744                pci_iounmap(pdev, np->eeprom_addr);
1745                free_netdev (dev);
1746                pci_release_regions (pdev);
1747                pci_disable_device (pdev);
1748        }
1749        pci_set_drvdata (pdev, NULL);
1750}
1751
1752static struct pci_driver rio_driver = {
1753        .name           = "dl2k",
1754        .id_table       = rio_pci_tbl,
1755        .probe          = rio_probe1,
1756        .remove         = rio_remove1,
1757};
1758
1759module_pci_driver(rio_driver);
1760/*
1761
1762Compile command:
1763
1764gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
1765
1766Read Documentation/networking/dl2k.txt for details.
1767
1768*/
1769
1770