linux/drivers/net/ethernet/via/via-velocity.c
<<
>>
Prefs
   1/*
   2 * This code is derived from the VIA reference driver (copyright message
   3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   4 * addition to the Linux kernel.
   5 *
   6 * The code has been merged into one source file, cleaned up to follow
   7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   8 * for 64bit hardware platforms.
   9 *
  10 * TODO
  11 *      rx_copybreak/alignment
  12 *      More testing
  13 *
  14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  15 * Additional fixes and clean up: Francois Romieu
  16 *
  17 * This source has not been verified for use in safety critical systems.
  18 *
  19 * Please direct queries about the revamped driver to the linux-kernel
  20 * list not VIA.
  21 *
  22 * Original code:
  23 *
  24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  25 * All rights reserved.
  26 *
  27 * This software may be redistributed and/or modified under
  28 * the terms of the GNU General Public License as published by the Free
  29 * Software Foundation; either version 2 of the License, or
  30 * any later version.
  31 *
  32 * This program is distributed in the hope that it will be useful, but
  33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  35 * for more details.
  36 *
  37 * Author: Chuang Liang-Shing, AJ Jiang
  38 *
  39 * Date: Jan 24, 2003
  40 *
  41 * MODULE_LICENSE("GPL");
  42 *
  43 */
  44
  45#include <linux/module.h>
  46#include <linux/types.h>
  47#include <linux/bitops.h>
  48#include <linux/init.h>
  49#include <linux/mm.h>
  50#include <linux/errno.h>
  51#include <linux/ioport.h>
  52#include <linux/pci.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/delay.h>
  58#include <linux/timer.h>
  59#include <linux/slab.h>
  60#include <linux/interrupt.h>
  61#include <linux/string.h>
  62#include <linux/wait.h>
  63#include <linux/io.h>
  64#include <linux/if.h>
  65#include <linux/uaccess.h>
  66#include <linux/proc_fs.h>
  67#include <linux/inetdevice.h>
  68#include <linux/reboot.h>
  69#include <linux/ethtool.h>
  70#include <linux/mii.h>
  71#include <linux/in.h>
  72#include <linux/if_arp.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/udp.h>
  77#include <linux/crc-ccitt.h>
  78#include <linux/crc32.h>
  79
  80#include "via-velocity.h"
  81
  82
  83static int velocity_nics;
  84static int msglevel = MSG_LEVEL_INFO;
  85
  86/**
  87 *      mac_get_cam_mask        -       Read a CAM mask
  88 *      @regs: register block for this velocity
  89 *      @mask: buffer to store mask
  90 *
  91 *      Fetch the mask bits of the selected CAM and store them into the
  92 *      provided mask buffer.
  93 */
  94static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
  95{
  96        int i;
  97
  98        /* Select CAM mask */
  99        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 100
 101        writeb(0, &regs->CAMADDR);
 102
 103        /* read mask */
 104        for (i = 0; i < 8; i++)
 105                *mask++ = readb(&(regs->MARCAM[i]));
 106
 107        /* disable CAMEN */
 108        writeb(0, &regs->CAMADDR);
 109
 110        /* Select mar */
 111        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 112}
 113
 114/**
 115 *      mac_set_cam_mask        -       Set a CAM mask
 116 *      @regs: register block for this velocity
 117 *      @mask: CAM mask to load
 118 *
 119 *      Store a new mask into a CAM
 120 */
 121static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 122{
 123        int i;
 124        /* Select CAM mask */
 125        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 126
 127        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 128
 129        for (i = 0; i < 8; i++)
 130                writeb(*mask++, &(regs->MARCAM[i]));
 131
 132        /* disable CAMEN */
 133        writeb(0, &regs->CAMADDR);
 134
 135        /* Select mar */
 136        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 137}
 138
 139static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 140{
 141        int i;
 142        /* Select CAM mask */
 143        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 144
 145        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 146
 147        for (i = 0; i < 8; i++)
 148                writeb(*mask++, &(regs->MARCAM[i]));
 149
 150        /* disable CAMEN */
 151        writeb(0, &regs->CAMADDR);
 152
 153        /* Select mar */
 154        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 155}
 156
 157/**
 158 *      mac_set_cam     -       set CAM data
 159 *      @regs: register block of this velocity
 160 *      @idx: Cam index
 161 *      @addr: 2 or 6 bytes of CAM data
 162 *
 163 *      Load an address or vlan tag into a CAM
 164 */
 165static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 166{
 167        int i;
 168
 169        /* Select CAM mask */
 170        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 171
 172        idx &= (64 - 1);
 173
 174        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 175
 176        for (i = 0; i < 6; i++)
 177                writeb(*addr++, &(regs->MARCAM[i]));
 178
 179        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 180
 181        udelay(10);
 182
 183        writeb(0, &regs->CAMADDR);
 184
 185        /* Select mar */
 186        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 187}
 188
 189static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 190                             const u8 *addr)
 191{
 192
 193        /* Select CAM mask */
 194        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 195
 196        idx &= (64 - 1);
 197
 198        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 199        writew(*((u16 *) addr), &regs->MARCAM[0]);
 200
 201        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 202
 203        udelay(10);
 204
 205        writeb(0, &regs->CAMADDR);
 206
 207        /* Select mar */
 208        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 209}
 210
 211
 212/**
 213 *      mac_wol_reset   -       reset WOL after exiting low power
 214 *      @regs: register block of this velocity
 215 *
 216 *      Called after we drop out of wake on lan mode in order to
 217 *      reset the Wake on lan features. This function doesn't restore
 218 *      the rest of the logic from the result of sleep/wakeup
 219 */
 220static void mac_wol_reset(struct mac_regs __iomem *regs)
 221{
 222
 223        /* Turn off SWPTAG right after leaving power mode */
 224        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 225        /* clear sticky bits */
 226        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 227
 228        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 229        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 230        /* disable force PME-enable */
 231        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 232        /* disable power-event config bit */
 233        writew(0xFFFF, &regs->WOLCRClr);
 234        /* clear power status */
 235        writew(0xFFFF, &regs->WOLSRClr);
 236}
 237
 238static const struct ethtool_ops velocity_ethtool_ops;
 239
 240/*
 241    Define module options
 242*/
 243
 244MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 245MODULE_LICENSE("GPL");
 246MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 247
 248#define VELOCITY_PARAM(N, D) \
 249        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 250        module_param_array(N, int, NULL, 0); \
 251        MODULE_PARM_DESC(N, D);
 252
 253#define RX_DESC_MIN     64
 254#define RX_DESC_MAX     255
 255#define RX_DESC_DEF     64
 256VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 257
 258#define TX_DESC_MIN     16
 259#define TX_DESC_MAX     256
 260#define TX_DESC_DEF     64
 261VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 262
 263#define RX_THRESH_MIN   0
 264#define RX_THRESH_MAX   3
 265#define RX_THRESH_DEF   0
 266/* rx_thresh[] is used for controlling the receive fifo threshold.
 267   0: indicate the rxfifo threshold is 128 bytes.
 268   1: indicate the rxfifo threshold is 512 bytes.
 269   2: indicate the rxfifo threshold is 1024 bytes.
 270   3: indicate the rxfifo threshold is store & forward.
 271*/
 272VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 273
 274#define DMA_LENGTH_MIN  0
 275#define DMA_LENGTH_MAX  7
 276#define DMA_LENGTH_DEF  6
 277
 278/* DMA_length[] is used for controlling the DMA length
 279   0: 8 DWORDs
 280   1: 16 DWORDs
 281   2: 32 DWORDs
 282   3: 64 DWORDs
 283   4: 128 DWORDs
 284   5: 256 DWORDs
 285   6: SF(flush till emply)
 286   7: SF(flush till emply)
 287*/
 288VELOCITY_PARAM(DMA_length, "DMA length");
 289
 290#define IP_ALIG_DEF     0
 291/* IP_byte_align[] is used for IP header DWORD byte aligned
 292   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 293   1: indicate the IP header will be DWORD byte aligned.
 294      In some environment, the IP header should be DWORD byte aligned,
 295      or the packet will be droped when we receive it. (eg: IPVS)
 296*/
 297VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 298
 299#define FLOW_CNTL_DEF   1
 300#define FLOW_CNTL_MIN   1
 301#define FLOW_CNTL_MAX   5
 302
 303/* flow_control[] is used for setting the flow control ability of NIC.
 304   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 305   2: enable TX flow control.
 306   3: enable RX flow control.
 307   4: enable RX/TX flow control.
 308   5: disable
 309*/
 310VELOCITY_PARAM(flow_control, "Enable flow control ability");
 311
 312#define MED_LNK_DEF 0
 313#define MED_LNK_MIN 0
 314#define MED_LNK_MAX 5
 315/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 316   0: indicate autonegotiation for both speed and duplex mode
 317   1: indicate 100Mbps half duplex mode
 318   2: indicate 100Mbps full duplex mode
 319   3: indicate 10Mbps half duplex mode
 320   4: indicate 10Mbps full duplex mode
 321   5: indicate 1000Mbps full duplex mode
 322
 323   Note:
 324   if EEPROM have been set to the force mode, this option is ignored
 325   by driver.
 326*/
 327VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 328
 329#define VAL_PKT_LEN_DEF     0
 330/* ValPktLen[] is used for setting the checksum offload ability of NIC.
 331   0: Receive frame with invalid layer 2 length (Default)
 332   1: Drop frame with invalid layer 2 length
 333*/
 334VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
 335
 336#define WOL_OPT_DEF     0
 337#define WOL_OPT_MIN     0
 338#define WOL_OPT_MAX     7
 339/* wol_opts[] is used for controlling wake on lan behavior.
 340   0: Wake up if recevied a magic packet. (Default)
 341   1: Wake up if link status is on/off.
 342   2: Wake up if recevied an arp packet.
 343   4: Wake up if recevied any unicast packet.
 344   Those value can be sumed up to support more than one option.
 345*/
 346VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 347
 348static int rx_copybreak = 200;
 349module_param(rx_copybreak, int, 0644);
 350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 351
 352/*
 353 *      Internal board variants. At the moment we have only one
 354 */
 355static struct velocity_info_tbl chip_info_table[] = {
 356        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 357        { }
 358};
 359
 360/*
 361 *      Describe the PCI device identifiers that we support in this
 362 *      device driver. Used for hotplug autoloading.
 363 */
 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
 365        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 366        { }
 367};
 368
 369MODULE_DEVICE_TABLE(pci, velocity_id_table);
 370
 371/**
 372 *      get_chip_name   -       identifier to name
 373 *      @id: chip identifier
 374 *
 375 *      Given a chip identifier return a suitable description. Returns
 376 *      a pointer a static string valid while the driver is loaded.
 377 */
 378static const char __devinit *get_chip_name(enum chip_type chip_id)
 379{
 380        int i;
 381        for (i = 0; chip_info_table[i].name != NULL; i++)
 382                if (chip_info_table[i].chip_id == chip_id)
 383                        break;
 384        return chip_info_table[i].name;
 385}
 386
 387/**
 388 *      velocity_remove1        -       device unplug
 389 *      @pdev: PCI device being removed
 390 *
 391 *      Device unload callback. Called on an unplug or on module
 392 *      unload for each active device that is present. Disconnects
 393 *      the device from the network layer and frees all the resources
 394 */
 395static void __devexit velocity_remove1(struct pci_dev *pdev)
 396{
 397        struct net_device *dev = pci_get_drvdata(pdev);
 398        struct velocity_info *vptr = netdev_priv(dev);
 399
 400        unregister_netdev(dev);
 401        iounmap(vptr->mac_regs);
 402        pci_release_regions(pdev);
 403        pci_disable_device(pdev);
 404        pci_set_drvdata(pdev, NULL);
 405        free_netdev(dev);
 406
 407        velocity_nics--;
 408}
 409
 410/**
 411 *      velocity_set_int_opt    -       parser for integer options
 412 *      @opt: pointer to option value
 413 *      @val: value the user requested (or -1 for default)
 414 *      @min: lowest value allowed
 415 *      @max: highest value allowed
 416 *      @def: default value
 417 *      @name: property name
 418 *      @dev: device name
 419 *
 420 *      Set an integer property in the module options. This function does
 421 *      all the verification and checking as well as reporting so that
 422 *      we don't duplicate code for each option.
 423 */
 424static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
 425{
 426        if (val == -1)
 427                *opt = def;
 428        else if (val < min || val > max) {
 429                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 430                                        devname, name, min, max);
 431                *opt = def;
 432        } else {
 433                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 434                                        devname, name, val);
 435                *opt = val;
 436        }
 437}
 438
 439/**
 440 *      velocity_set_bool_opt   -       parser for boolean options
 441 *      @opt: pointer to option value
 442 *      @val: value the user requested (or -1 for default)
 443 *      @def: default value (yes/no)
 444 *      @flag: numeric value to set for true.
 445 *      @name: property name
 446 *      @dev: device name
 447 *
 448 *      Set a boolean property in the module options. This function does
 449 *      all the verification and checking as well as reporting so that
 450 *      we don't duplicate code for each option.
 451 */
 452static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
 453{
 454        (*opt) &= (~flag);
 455        if (val == -1)
 456                *opt |= (def ? flag : 0);
 457        else if (val < 0 || val > 1) {
 458                printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 459                        devname, name);
 460                *opt |= (def ? flag : 0);
 461        } else {
 462                printk(KERN_INFO "%s: set parameter %s to %s\n",
 463                        devname, name, val ? "TRUE" : "FALSE");
 464                *opt |= (val ? flag : 0);
 465        }
 466}
 467
 468/**
 469 *      velocity_get_options    -       set options on device
 470 *      @opts: option structure for the device
 471 *      @index: index of option to use in module options array
 472 *      @devname: device name
 473 *
 474 *      Turn the module and command options into a single structure
 475 *      for the current device
 476 */
 477static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
 478{
 479
 480        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 481        velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 482        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 483        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 484
 485        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 486        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 487        velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 488        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 489        velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 490        opts->numrx = (opts->numrx & ~3);
 491}
 492
 493/**
 494 *      velocity_init_cam_filter        -       initialise CAM
 495 *      @vptr: velocity to program
 496 *
 497 *      Initialize the content addressable memory used for filters. Load
 498 *      appropriately according to the presence of VLAN
 499 */
 500static void velocity_init_cam_filter(struct velocity_info *vptr)
 501{
 502        struct mac_regs __iomem *regs = vptr->mac_regs;
 503        unsigned int vid, i = 0;
 504
 505        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 506        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 507        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 508
 509        /* Disable all CAMs */
 510        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 511        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 512        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 513        mac_set_cam_mask(regs, vptr->mCAMmask);
 514
 515        /* Enable VCAMs */
 516        for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 517                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 518                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 519                if (++i >= VCAM_SIZE)
 520                        break;
 521        }
 522        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 523}
 524
 525static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 526{
 527        struct velocity_info *vptr = netdev_priv(dev);
 528
 529        spin_lock_irq(&vptr->lock);
 530        set_bit(vid, vptr->active_vlans);
 531        velocity_init_cam_filter(vptr);
 532        spin_unlock_irq(&vptr->lock);
 533        return 0;
 534}
 535
 536static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 537{
 538        struct velocity_info *vptr = netdev_priv(dev);
 539
 540        spin_lock_irq(&vptr->lock);
 541        clear_bit(vid, vptr->active_vlans);
 542        velocity_init_cam_filter(vptr);
 543        spin_unlock_irq(&vptr->lock);
 544        return 0;
 545}
 546
 547static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 548{
 549        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 550}
 551
 552/**
 553 *      velocity_rx_reset       -       handle a receive reset
 554 *      @vptr: velocity we are resetting
 555 *
 556 *      Reset the ownership and status for the receive ring side.
 557 *      Hand all the receive queue to the NIC.
 558 */
 559static void velocity_rx_reset(struct velocity_info *vptr)
 560{
 561
 562        struct mac_regs __iomem *regs = vptr->mac_regs;
 563        int i;
 564
 565        velocity_init_rx_ring_indexes(vptr);
 566
 567        /*
 568         *      Init state, all RD entries belong to the NIC
 569         */
 570        for (i = 0; i < vptr->options.numrx; ++i)
 571                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 572
 573        writew(vptr->options.numrx, &regs->RBRDU);
 574        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 575        writew(0, &regs->RDIdx);
 576        writew(vptr->options.numrx - 1, &regs->RDCSize);
 577}
 578
 579/**
 580 *      velocity_get_opt_media_mode     -       get media selection
 581 *      @vptr: velocity adapter
 582 *
 583 *      Get the media mode stored in EEPROM or module options and load
 584 *      mii_status accordingly. The requested link state information
 585 *      is also returned.
 586 */
 587static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 588{
 589        u32 status = 0;
 590
 591        switch (vptr->options.spd_dpx) {
 592        case SPD_DPX_AUTO:
 593                status = VELOCITY_AUTONEG_ENABLE;
 594                break;
 595        case SPD_DPX_100_FULL:
 596                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 597                break;
 598        case SPD_DPX_10_FULL:
 599                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 600                break;
 601        case SPD_DPX_100_HALF:
 602                status = VELOCITY_SPEED_100;
 603                break;
 604        case SPD_DPX_10_HALF:
 605                status = VELOCITY_SPEED_10;
 606                break;
 607        case SPD_DPX_1000_FULL:
 608                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 609                break;
 610        }
 611        vptr->mii_status = status;
 612        return status;
 613}
 614
 615/**
 616 *      safe_disable_mii_autopoll       -       autopoll off
 617 *      @regs: velocity registers
 618 *
 619 *      Turn off the autopoll and wait for it to disable on the chip
 620 */
 621static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 622{
 623        u16 ww;
 624
 625        /*  turn off MAUTO */
 626        writeb(0, &regs->MIICR);
 627        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 628                udelay(1);
 629                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 630                        break;
 631        }
 632}
 633
 634/**
 635 *      enable_mii_autopoll     -       turn on autopolling
 636 *      @regs: velocity registers
 637 *
 638 *      Enable the MII link status autopoll feature on the Velocity
 639 *      hardware. Wait for it to enable.
 640 */
 641static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 642{
 643        int ii;
 644
 645        writeb(0, &(regs->MIICR));
 646        writeb(MIIADR_SWMPL, &regs->MIIADR);
 647
 648        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 649                udelay(1);
 650                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 651                        break;
 652        }
 653
 654        writeb(MIICR_MAUTO, &regs->MIICR);
 655
 656        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 657                udelay(1);
 658                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 659                        break;
 660        }
 661
 662}
 663
 664/**
 665 *      velocity_mii_read       -       read MII data
 666 *      @regs: velocity registers
 667 *      @index: MII register index
 668 *      @data: buffer for received data
 669 *
 670 *      Perform a single read of an MII 16bit register. Returns zero
 671 *      on success or -ETIMEDOUT if the PHY did not respond.
 672 */
 673static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 674{
 675        u16 ww;
 676
 677        /*
 678         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 679         */
 680        safe_disable_mii_autopoll(regs);
 681
 682        writeb(index, &regs->MIIADR);
 683
 684        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 685
 686        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 687                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 688                        break;
 689        }
 690
 691        *data = readw(&regs->MIIDATA);
 692
 693        enable_mii_autopoll(regs);
 694        if (ww == W_MAX_TIMEOUT)
 695                return -ETIMEDOUT;
 696        return 0;
 697}
 698
 699/**
 700 *      mii_check_media_mode    -       check media state
 701 *      @regs: velocity registers
 702 *
 703 *      Check the current MII status and determine the link status
 704 *      accordingly
 705 */
 706static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 707{
 708        u32 status = 0;
 709        u16 ANAR;
 710
 711        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 712                status |= VELOCITY_LINK_FAIL;
 713
 714        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 715                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 716        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 717                status |= (VELOCITY_SPEED_1000);
 718        else {
 719                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 720                if (ANAR & ADVERTISE_100FULL)
 721                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 722                else if (ANAR & ADVERTISE_100HALF)
 723                        status |= VELOCITY_SPEED_100;
 724                else if (ANAR & ADVERTISE_10FULL)
 725                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 726                else
 727                        status |= (VELOCITY_SPEED_10);
 728        }
 729
 730        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 731                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 732                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 733                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 734                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 735                                status |= VELOCITY_AUTONEG_ENABLE;
 736                }
 737        }
 738
 739        return status;
 740}
 741
 742/**
 743 *      velocity_mii_write      -       write MII data
 744 *      @regs: velocity registers
 745 *      @index: MII register index
 746 *      @data: 16bit data for the MII register
 747 *
 748 *      Perform a single write to an MII 16bit register. Returns zero
 749 *      on success or -ETIMEDOUT if the PHY did not respond.
 750 */
 751static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 752{
 753        u16 ww;
 754
 755        /*
 756         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 757         */
 758        safe_disable_mii_autopoll(regs);
 759
 760        /* MII reg offset */
 761        writeb(mii_addr, &regs->MIIADR);
 762        /* set MII data */
 763        writew(data, &regs->MIIDATA);
 764
 765        /* turn on MIICR_WCMD */
 766        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 767
 768        /* W_MAX_TIMEOUT is the timeout period */
 769        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 770                udelay(5);
 771                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 772                        break;
 773        }
 774        enable_mii_autopoll(regs);
 775
 776        if (ww == W_MAX_TIMEOUT)
 777                return -ETIMEDOUT;
 778        return 0;
 779}
 780
 781/**
 782 *      set_mii_flow_control    -       flow control setup
 783 *      @vptr: velocity interface
 784 *
 785 *      Set up the flow control on this interface according to
 786 *      the supplied user/eeprom options.
 787 */
 788static void set_mii_flow_control(struct velocity_info *vptr)
 789{
 790        /*Enable or Disable PAUSE in ANAR */
 791        switch (vptr->options.flow_cntl) {
 792        case FLOW_CNTL_TX:
 793                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 794                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 795                break;
 796
 797        case FLOW_CNTL_RX:
 798                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 799                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 800                break;
 801
 802        case FLOW_CNTL_TX_RX:
 803                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 804                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 805                break;
 806
 807        case FLOW_CNTL_DISABLE:
 808                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 809                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 810                break;
 811        default:
 812                break;
 813        }
 814}
 815
 816/**
 817 *      mii_set_auto_on         -       autonegotiate on
 818 *      @vptr: velocity
 819 *
 820 *      Enable autonegotation on this interface
 821 */
 822static void mii_set_auto_on(struct velocity_info *vptr)
 823{
 824        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 825                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 826        else
 827                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 828}
 829
 830static u32 check_connection_type(struct mac_regs __iomem *regs)
 831{
 832        u32 status = 0;
 833        u8 PHYSR0;
 834        u16 ANAR;
 835        PHYSR0 = readb(&regs->PHYSR0);
 836
 837        /*
 838           if (!(PHYSR0 & PHYSR0_LINKGD))
 839           status|=VELOCITY_LINK_FAIL;
 840         */
 841
 842        if (PHYSR0 & PHYSR0_FDPX)
 843                status |= VELOCITY_DUPLEX_FULL;
 844
 845        if (PHYSR0 & PHYSR0_SPDG)
 846                status |= VELOCITY_SPEED_1000;
 847        else if (PHYSR0 & PHYSR0_SPD10)
 848                status |= VELOCITY_SPEED_10;
 849        else
 850                status |= VELOCITY_SPEED_100;
 851
 852        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 853                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 854                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 855                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 856                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 857                                status |= VELOCITY_AUTONEG_ENABLE;
 858                }
 859        }
 860
 861        return status;
 862}
 863
 864/**
 865 *      velocity_set_media_mode         -       set media mode
 866 *      @mii_status: old MII link state
 867 *
 868 *      Check the media link state and configure the flow control
 869 *      PHY and also velocity hardware setup accordingly. In particular
 870 *      we need to set up CD polling and frame bursting.
 871 */
 872static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 873{
 874        u32 curr_status;
 875        struct mac_regs __iomem *regs = vptr->mac_regs;
 876
 877        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 878        curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 879
 880        /* Set mii link status */
 881        set_mii_flow_control(vptr);
 882
 883        /*
 884           Check if new status is consistent with current status
 885           if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 886               (mii_status==curr_status)) {
 887           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 888           vptr->mii_status=check_connection_type(vptr->mac_regs);
 889           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 890           return 0;
 891           }
 892         */
 893
 894        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 895                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 896
 897        /*
 898         *      If connection type is AUTO
 899         */
 900        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 901                VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 902                /* clear force MAC mode bit */
 903                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 904                /* set duplex mode of MAC according to duplex mode of MII */
 905                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 906                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 907                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 908
 909                /* enable AUTO-NEGO mode */
 910                mii_set_auto_on(vptr);
 911        } else {
 912                u16 CTRL1000;
 913                u16 ANAR;
 914                u8 CHIPGCR;
 915
 916                /*
 917                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 918                 *    and enable it in fullduplex mode
 919                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 920                 * 3. only enable CD heart beat counter in 10HD mode
 921                 */
 922
 923                /* set force MAC mode bit */
 924                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 925
 926                CHIPGCR = readb(&regs->CHIPGCR);
 927
 928                if (mii_status & VELOCITY_SPEED_1000)
 929                        CHIPGCR |= CHIPGCR_FCGMII;
 930                else
 931                        CHIPGCR &= ~CHIPGCR_FCGMII;
 932
 933                if (mii_status & VELOCITY_DUPLEX_FULL) {
 934                        CHIPGCR |= CHIPGCR_FCFDX;
 935                        writeb(CHIPGCR, &regs->CHIPGCR);
 936                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 937                        if (vptr->rev_id < REV_ID_VT3216_A0)
 938                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 939                } else {
 940                        CHIPGCR &= ~CHIPGCR_FCFDX;
 941                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 942                        writeb(CHIPGCR, &regs->CHIPGCR);
 943                        if (vptr->rev_id < REV_ID_VT3216_A0)
 944                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 945                }
 946
 947                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 948                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 949                if ((mii_status & VELOCITY_SPEED_1000) &&
 950                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 951                        CTRL1000 |= ADVERTISE_1000FULL;
 952                }
 953                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 954
 955                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 956                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 957                else
 958                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 959
 960                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 961                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 962                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 963                if (mii_status & VELOCITY_SPEED_100) {
 964                        if (mii_status & VELOCITY_DUPLEX_FULL)
 965                                ANAR |= ADVERTISE_100FULL;
 966                        else
 967                                ANAR |= ADVERTISE_100HALF;
 968                } else if (mii_status & VELOCITY_SPEED_10) {
 969                        if (mii_status & VELOCITY_DUPLEX_FULL)
 970                                ANAR |= ADVERTISE_10FULL;
 971                        else
 972                                ANAR |= ADVERTISE_10HALF;
 973                }
 974                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 975                /* enable AUTO-NEGO mode */
 976                mii_set_auto_on(vptr);
 977                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 978        }
 979        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 980        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 981        return VELOCITY_LINK_CHANGE;
 982}
 983
 984/**
 985 *      velocity_print_link_status      -       link status reporting
 986 *      @vptr: velocity to report on
 987 *
 988 *      Turn the link status of the velocity card into a kernel log
 989 *      description of the new link state, detailing speed and duplex
 990 *      status
 991 */
 992static void velocity_print_link_status(struct velocity_info *vptr)
 993{
 994
 995        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
 996                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
 997        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
 998                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
 999
1000                if (vptr->mii_status & VELOCITY_SPEED_1000)
1001                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1002                else if (vptr->mii_status & VELOCITY_SPEED_100)
1003                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1004                else
1005                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1006
1007                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1008                        VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1009                else
1010                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1011        } else {
1012                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1013                switch (vptr->options.spd_dpx) {
1014                case SPD_DPX_1000_FULL:
1015                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1016                        break;
1017                case SPD_DPX_100_HALF:
1018                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1019                        break;
1020                case SPD_DPX_100_FULL:
1021                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1022                        break;
1023                case SPD_DPX_10_HALF:
1024                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1025                        break;
1026                case SPD_DPX_10_FULL:
1027                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1028                        break;
1029                default:
1030                        break;
1031                }
1032        }
1033}
1034
1035/**
1036 *      enable_flow_control_ability     -       flow control
1037 *      @vptr: veloity to configure
1038 *
1039 *      Set up flow control according to the flow control options
1040 *      determined by the eeprom/configuration.
1041 */
1042static void enable_flow_control_ability(struct velocity_info *vptr)
1043{
1044
1045        struct mac_regs __iomem *regs = vptr->mac_regs;
1046
1047        switch (vptr->options.flow_cntl) {
1048
1049        case FLOW_CNTL_DEFAULT:
1050                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1051                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1052                else
1053                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1054
1055                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1056                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1057                else
1058                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1059                break;
1060
1061        case FLOW_CNTL_TX:
1062                writel(CR0_FDXTFCEN, &regs->CR0Set);
1063                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1064                break;
1065
1066        case FLOW_CNTL_RX:
1067                writel(CR0_FDXRFCEN, &regs->CR0Set);
1068                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1069                break;
1070
1071        case FLOW_CNTL_TX_RX:
1072                writel(CR0_FDXTFCEN, &regs->CR0Set);
1073                writel(CR0_FDXRFCEN, &regs->CR0Set);
1074                break;
1075
1076        case FLOW_CNTL_DISABLE:
1077                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1078                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1079                break;
1080
1081        default:
1082                break;
1083        }
1084
1085}
1086
1087/**
1088 *      velocity_soft_reset     -       soft reset
1089 *      @vptr: velocity to reset
1090 *
1091 *      Kick off a soft reset of the velocity adapter and then poll
1092 *      until the reset sequence has completed before returning.
1093 */
1094static int velocity_soft_reset(struct velocity_info *vptr)
1095{
1096        struct mac_regs __iomem *regs = vptr->mac_regs;
1097        int i = 0;
1098
1099        writel(CR0_SFRST, &regs->CR0Set);
1100
1101        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1102                udelay(5);
1103                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1104                        break;
1105        }
1106
1107        if (i == W_MAX_TIMEOUT) {
1108                writel(CR0_FORSRST, &regs->CR0Set);
1109                /* FIXME: PCI POSTING */
1110                /* delay 2ms */
1111                mdelay(2);
1112        }
1113        return 0;
1114}
1115
1116/**
1117 *      velocity_set_multi      -       filter list change callback
1118 *      @dev: network device
1119 *
1120 *      Called by the network layer when the filter lists need to change
1121 *      for a velocity adapter. Reload the CAMs with the new address
1122 *      filter ruleset.
1123 */
1124static void velocity_set_multi(struct net_device *dev)
1125{
1126        struct velocity_info *vptr = netdev_priv(dev);
1127        struct mac_regs __iomem *regs = vptr->mac_regs;
1128        u8 rx_mode;
1129        int i;
1130        struct netdev_hw_addr *ha;
1131
1132        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1133                writel(0xffffffff, &regs->MARCAM[0]);
1134                writel(0xffffffff, &regs->MARCAM[4]);
1135                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1136        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1137                   (dev->flags & IFF_ALLMULTI)) {
1138                writel(0xffffffff, &regs->MARCAM[0]);
1139                writel(0xffffffff, &regs->MARCAM[4]);
1140                rx_mode = (RCR_AM | RCR_AB);
1141        } else {
1142                int offset = MCAM_SIZE - vptr->multicast_limit;
1143                mac_get_cam_mask(regs, vptr->mCAMmask);
1144
1145                i = 0;
1146                netdev_for_each_mc_addr(ha, dev) {
1147                        mac_set_cam(regs, i + offset, ha->addr);
1148                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1149                        i++;
1150                }
1151
1152                mac_set_cam_mask(regs, vptr->mCAMmask);
1153                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1154        }
1155        if (dev->mtu > 1500)
1156                rx_mode |= RCR_AL;
1157
1158        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1159
1160}
1161
1162/*
1163 * MII access , media link mode setting functions
1164 */
1165
1166/**
1167 *      mii_init        -       set up MII
1168 *      @vptr: velocity adapter
1169 *      @mii_status:  links tatus
1170 *
1171 *      Set up the PHY for the current link state.
1172 */
1173static void mii_init(struct velocity_info *vptr, u32 mii_status)
1174{
1175        u16 BMCR;
1176
1177        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1178        case PHYID_CICADA_CS8201:
1179                /*
1180                 *      Reset to hardware default
1181                 */
1182                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1183                /*
1184                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1185                 *      off it in NWay-forced half mode for NWay-forced v.s.
1186                 *      legacy-forced issue.
1187                 */
1188                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1189                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1190                else
1191                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1192                /*
1193                 *      Turn on Link/Activity LED enable bit for CIS8201
1194                 */
1195                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1196                break;
1197        case PHYID_VT3216_32BIT:
1198        case PHYID_VT3216_64BIT:
1199                /*
1200                 *      Reset to hardware default
1201                 */
1202                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1203                /*
1204                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1205                 *      off it in NWay-forced half mode for NWay-forced v.s.
1206                 *      legacy-forced issue
1207                 */
1208                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1209                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1210                else
1211                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1212                break;
1213
1214        case PHYID_MARVELL_1000:
1215        case PHYID_MARVELL_1000S:
1216                /*
1217                 *      Assert CRS on Transmit
1218                 */
1219                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1220                /*
1221                 *      Reset to hardware default
1222                 */
1223                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1224                break;
1225        default:
1226                ;
1227        }
1228        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1229        if (BMCR & BMCR_ISOLATE) {
1230                BMCR &= ~BMCR_ISOLATE;
1231                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1232        }
1233}
1234
1235/**
1236 * setup_queue_timers   -       Setup interrupt timers
1237 *
1238 * Setup interrupt frequency during suppression (timeout if the frame
1239 * count isn't filled).
1240 */
1241static void setup_queue_timers(struct velocity_info *vptr)
1242{
1243        /* Only for newer revisions */
1244        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1245                u8 txqueue_timer = 0;
1246                u8 rxqueue_timer = 0;
1247
1248                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1249                                VELOCITY_SPEED_100)) {
1250                        txqueue_timer = vptr->options.txqueue_timer;
1251                        rxqueue_timer = vptr->options.rxqueue_timer;
1252                }
1253
1254                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1255                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1256        }
1257}
1258
1259/**
1260 * setup_adaptive_interrupts  -  Setup interrupt suppression
1261 *
1262 * @vptr velocity adapter
1263 *
1264 * The velocity is able to suppress interrupt during high interrupt load.
1265 * This function turns on that feature.
1266 */
1267static void setup_adaptive_interrupts(struct velocity_info *vptr)
1268{
1269        struct mac_regs __iomem *regs = vptr->mac_regs;
1270        u16 tx_intsup = vptr->options.tx_intsup;
1271        u16 rx_intsup = vptr->options.rx_intsup;
1272
1273        /* Setup default interrupt mask (will be changed below) */
1274        vptr->int_mask = INT_MASK_DEF;
1275
1276        /* Set Tx Interrupt Suppression Threshold */
1277        writeb(CAMCR_PS0, &regs->CAMCR);
1278        if (tx_intsup != 0) {
1279                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1280                                ISR_PTX2I | ISR_PTX3I);
1281                writew(tx_intsup, &regs->ISRCTL);
1282        } else
1283                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1284
1285        /* Set Rx Interrupt Suppression Threshold */
1286        writeb(CAMCR_PS1, &regs->CAMCR);
1287        if (rx_intsup != 0) {
1288                vptr->int_mask &= ~ISR_PRXI;
1289                writew(rx_intsup, &regs->ISRCTL);
1290        } else
1291                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1292
1293        /* Select page to interrupt hold timer */
1294        writeb(0, &regs->CAMCR);
1295}
1296
1297/**
1298 *      velocity_init_registers -       initialise MAC registers
1299 *      @vptr: velocity to init
1300 *      @type: type of initialisation (hot or cold)
1301 *
1302 *      Initialise the MAC on a reset or on first set up on the
1303 *      hardware.
1304 */
1305static void velocity_init_registers(struct velocity_info *vptr,
1306                                    enum velocity_init_type type)
1307{
1308        struct mac_regs __iomem *regs = vptr->mac_regs;
1309        int i, mii_status;
1310
1311        mac_wol_reset(regs);
1312
1313        switch (type) {
1314        case VELOCITY_INIT_RESET:
1315        case VELOCITY_INIT_WOL:
1316
1317                netif_stop_queue(vptr->dev);
1318
1319                /*
1320                 *      Reset RX to prevent RX pointer not on the 4X location
1321                 */
1322                velocity_rx_reset(vptr);
1323                mac_rx_queue_run(regs);
1324                mac_rx_queue_wake(regs);
1325
1326                mii_status = velocity_get_opt_media_mode(vptr);
1327                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1328                        velocity_print_link_status(vptr);
1329                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1330                                netif_wake_queue(vptr->dev);
1331                }
1332
1333                enable_flow_control_ability(vptr);
1334
1335                mac_clear_isr(regs);
1336                writel(CR0_STOP, &regs->CR0Clr);
1337                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1338                                                        &regs->CR0Set);
1339
1340                break;
1341
1342        case VELOCITY_INIT_COLD:
1343        default:
1344                /*
1345                 *      Do reset
1346                 */
1347                velocity_soft_reset(vptr);
1348                mdelay(5);
1349
1350                mac_eeprom_reload(regs);
1351                for (i = 0; i < 6; i++)
1352                        writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1353
1354                /*
1355                 *      clear Pre_ACPI bit.
1356                 */
1357                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1358                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1359                mac_set_dma_length(regs, vptr->options.DMA_length);
1360
1361                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1362                /*
1363                 *      Back off algorithm use original IEEE standard
1364                 */
1365                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1366
1367                /*
1368                 *      Init CAM filter
1369                 */
1370                velocity_init_cam_filter(vptr);
1371
1372                /*
1373                 *      Set packet filter: Receive directed and broadcast address
1374                 */
1375                velocity_set_multi(vptr->dev);
1376
1377                /*
1378                 *      Enable MII auto-polling
1379                 */
1380                enable_mii_autopoll(regs);
1381
1382                setup_adaptive_interrupts(vptr);
1383
1384                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1385                writew(vptr->options.numrx - 1, &regs->RDCSize);
1386                mac_rx_queue_run(regs);
1387                mac_rx_queue_wake(regs);
1388
1389                writew(vptr->options.numtx - 1, &regs->TDCSize);
1390
1391                for (i = 0; i < vptr->tx.numq; i++) {
1392                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1393                        mac_tx_queue_run(regs, i);
1394                }
1395
1396                init_flow_control_register(vptr);
1397
1398                writel(CR0_STOP, &regs->CR0Clr);
1399                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1400
1401                mii_status = velocity_get_opt_media_mode(vptr);
1402                netif_stop_queue(vptr->dev);
1403
1404                mii_init(vptr, mii_status);
1405
1406                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1407                        velocity_print_link_status(vptr);
1408                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1409                                netif_wake_queue(vptr->dev);
1410                }
1411
1412                enable_flow_control_ability(vptr);
1413                mac_hw_mibs_init(regs);
1414                mac_write_int_mask(vptr->int_mask, regs);
1415                mac_clear_isr(regs);
1416
1417        }
1418}
1419
1420static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1421{
1422        struct mac_regs __iomem *regs = vptr->mac_regs;
1423        int avail, dirty, unusable;
1424
1425        /*
1426         * RD number must be equal to 4X per hardware spec
1427         * (programming guide rev 1.20, p.13)
1428         */
1429        if (vptr->rx.filled < 4)
1430                return;
1431
1432        wmb();
1433
1434        unusable = vptr->rx.filled & 0x0003;
1435        dirty = vptr->rx.dirty - unusable;
1436        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1437                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1438                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1439        }
1440
1441        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1442        vptr->rx.filled = unusable;
1443}
1444
1445/**
1446 *      velocity_init_dma_rings -       set up DMA rings
1447 *      @vptr: Velocity to set up
1448 *
1449 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1450 *      to use.
1451 */
1452static int velocity_init_dma_rings(struct velocity_info *vptr)
1453{
1454        struct velocity_opt *opt = &vptr->options;
1455        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1456        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1457        struct pci_dev *pdev = vptr->pdev;
1458        dma_addr_t pool_dma;
1459        void *pool;
1460        unsigned int i;
1461
1462        /*
1463         * Allocate all RD/TD rings a single pool.
1464         *
1465         * pci_alloc_consistent() fulfills the requirement for 64 bytes
1466         * alignment
1467         */
1468        pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1469                                    rx_ring_size, &pool_dma);
1470        if (!pool) {
1471                dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1472                        vptr->dev->name);
1473                return -ENOMEM;
1474        }
1475
1476        vptr->rx.ring = pool;
1477        vptr->rx.pool_dma = pool_dma;
1478
1479        pool += rx_ring_size;
1480        pool_dma += rx_ring_size;
1481
1482        for (i = 0; i < vptr->tx.numq; i++) {
1483                vptr->tx.rings[i] = pool;
1484                vptr->tx.pool_dma[i] = pool_dma;
1485                pool += tx_ring_size;
1486                pool_dma += tx_ring_size;
1487        }
1488
1489        return 0;
1490}
1491
1492static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1493{
1494        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1495}
1496
1497/**
1498 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1499 *      @vptr: velocity
1500 *      @idx: ring index
1501 *
1502 *      Allocate a new full sized buffer for the reception of a frame and
1503 *      map it into PCI space for the hardware to use. The hardware
1504 *      requires *64* byte alignment of the buffer which makes life
1505 *      less fun than would be ideal.
1506 */
1507static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1508{
1509        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1510        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1511
1512        rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
1513        if (rd_info->skb == NULL)
1514                return -ENOMEM;
1515
1516        /*
1517         *      Do the gymnastics to get the buffer head for data at
1518         *      64byte alignment.
1519         */
1520        skb_reserve(rd_info->skb,
1521                        64 - ((unsigned long) rd_info->skb->data & 63));
1522        rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1523                                        vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1524
1525        /*
1526         *      Fill in the descriptor to match
1527         */
1528
1529        *((u32 *) & (rd->rdesc0)) = 0;
1530        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1531        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1532        rd->pa_high = 0;
1533        return 0;
1534}
1535
1536
1537static int velocity_rx_refill(struct velocity_info *vptr)
1538{
1539        int dirty = vptr->rx.dirty, done = 0;
1540
1541        do {
1542                struct rx_desc *rd = vptr->rx.ring + dirty;
1543
1544                /* Fine for an all zero Rx desc at init time as well */
1545                if (rd->rdesc0.len & OWNED_BY_NIC)
1546                        break;
1547
1548                if (!vptr->rx.info[dirty].skb) {
1549                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1550                                break;
1551                }
1552                done++;
1553                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1554        } while (dirty != vptr->rx.curr);
1555
1556        if (done) {
1557                vptr->rx.dirty = dirty;
1558                vptr->rx.filled += done;
1559        }
1560
1561        return done;
1562}
1563
1564/**
1565 *      velocity_free_rd_ring   -       free receive ring
1566 *      @vptr: velocity to clean up
1567 *
1568 *      Free the receive buffers for each ring slot and any
1569 *      attached socket buffers that need to go away.
1570 */
1571static void velocity_free_rd_ring(struct velocity_info *vptr)
1572{
1573        int i;
1574
1575        if (vptr->rx.info == NULL)
1576                return;
1577
1578        for (i = 0; i < vptr->options.numrx; i++) {
1579                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1580                struct rx_desc *rd = vptr->rx.ring + i;
1581
1582                memset(rd, 0, sizeof(*rd));
1583
1584                if (!rd_info->skb)
1585                        continue;
1586                pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1587                                 PCI_DMA_FROMDEVICE);
1588                rd_info->skb_dma = 0;
1589
1590                dev_kfree_skb(rd_info->skb);
1591                rd_info->skb = NULL;
1592        }
1593
1594        kfree(vptr->rx.info);
1595        vptr->rx.info = NULL;
1596}
1597
1598/**
1599 *      velocity_init_rd_ring   -       set up receive ring
1600 *      @vptr: velocity to configure
1601 *
1602 *      Allocate and set up the receive buffers for each ring slot and
1603 *      assign them to the network adapter.
1604 */
1605static int velocity_init_rd_ring(struct velocity_info *vptr)
1606{
1607        int ret = -ENOMEM;
1608
1609        vptr->rx.info = kcalloc(vptr->options.numrx,
1610                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1611        if (!vptr->rx.info)
1612                goto out;
1613
1614        velocity_init_rx_ring_indexes(vptr);
1615
1616        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1617                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1618                        "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1619                velocity_free_rd_ring(vptr);
1620                goto out;
1621        }
1622
1623        ret = 0;
1624out:
1625        return ret;
1626}
1627
1628/**
1629 *      velocity_init_td_ring   -       set up transmit ring
1630 *      @vptr:  velocity
1631 *
1632 *      Set up the transmit ring and chain the ring pointers together.
1633 *      Returns zero on success or a negative posix errno code for
1634 *      failure.
1635 */
1636static int velocity_init_td_ring(struct velocity_info *vptr)
1637{
1638        int j;
1639
1640        /* Init the TD ring entries */
1641        for (j = 0; j < vptr->tx.numq; j++) {
1642
1643                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1644                                            sizeof(struct velocity_td_info),
1645                                            GFP_KERNEL);
1646                if (!vptr->tx.infos[j]) {
1647                        while (--j >= 0)
1648                                kfree(vptr->tx.infos[j]);
1649                        return -ENOMEM;
1650                }
1651
1652                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1653        }
1654        return 0;
1655}
1656
1657/**
1658 *      velocity_free_dma_rings -       free PCI ring pointers
1659 *      @vptr: Velocity to free from
1660 *
1661 *      Clean up the PCI ring buffers allocated to this velocity.
1662 */
1663static void velocity_free_dma_rings(struct velocity_info *vptr)
1664{
1665        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1666                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1667
1668        pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1669}
1670
1671static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1672{
1673        int ret;
1674
1675        velocity_set_rxbufsize(vptr, mtu);
1676
1677        ret = velocity_init_dma_rings(vptr);
1678        if (ret < 0)
1679                goto out;
1680
1681        ret = velocity_init_rd_ring(vptr);
1682        if (ret < 0)
1683                goto err_free_dma_rings_0;
1684
1685        ret = velocity_init_td_ring(vptr);
1686        if (ret < 0)
1687                goto err_free_rd_ring_1;
1688out:
1689        return ret;
1690
1691err_free_rd_ring_1:
1692        velocity_free_rd_ring(vptr);
1693err_free_dma_rings_0:
1694        velocity_free_dma_rings(vptr);
1695        goto out;
1696}
1697
1698/**
1699 *      velocity_free_tx_buf    -       free transmit buffer
1700 *      @vptr: velocity
1701 *      @tdinfo: buffer
1702 *
1703 *      Release an transmit buffer. If the buffer was preallocated then
1704 *      recycle it, if not then unmap the buffer.
1705 */
1706static void velocity_free_tx_buf(struct velocity_info *vptr,
1707                struct velocity_td_info *tdinfo, struct tx_desc *td)
1708{
1709        struct sk_buff *skb = tdinfo->skb;
1710
1711        /*
1712         *      Don't unmap the pre-allocated tx_bufs
1713         */
1714        if (tdinfo->skb_dma) {
1715                int i;
1716
1717                for (i = 0; i < tdinfo->nskb_dma; i++) {
1718                        size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1719
1720                        /* For scatter-gather */
1721                        if (skb_shinfo(skb)->nr_frags > 0)
1722                                pktlen = max_t(size_t, pktlen,
1723                                                td->td_buf[i].size & ~TD_QUEUE);
1724
1725                        pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1726                                        le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1727                }
1728        }
1729        dev_kfree_skb_irq(skb);
1730        tdinfo->skb = NULL;
1731}
1732
1733/*
1734 *      FIXME: could we merge this with velocity_free_tx_buf ?
1735 */
1736static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1737                                                         int q, int n)
1738{
1739        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1740        int i;
1741
1742        if (td_info == NULL)
1743                return;
1744
1745        if (td_info->skb) {
1746                for (i = 0; i < td_info->nskb_dma; i++) {
1747                        if (td_info->skb_dma[i]) {
1748                                pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1749                                        td_info->skb->len, PCI_DMA_TODEVICE);
1750                                td_info->skb_dma[i] = 0;
1751                        }
1752                }
1753                dev_kfree_skb(td_info->skb);
1754                td_info->skb = NULL;
1755        }
1756}
1757
1758/**
1759 *      velocity_free_td_ring   -       free td ring
1760 *      @vptr: velocity
1761 *
1762 *      Free up the transmit ring for this particular velocity adapter.
1763 *      We free the ring contents but not the ring itself.
1764 */
1765static void velocity_free_td_ring(struct velocity_info *vptr)
1766{
1767        int i, j;
1768
1769        for (j = 0; j < vptr->tx.numq; j++) {
1770                if (vptr->tx.infos[j] == NULL)
1771                        continue;
1772                for (i = 0; i < vptr->options.numtx; i++)
1773                        velocity_free_td_ring_entry(vptr, j, i);
1774
1775                kfree(vptr->tx.infos[j]);
1776                vptr->tx.infos[j] = NULL;
1777        }
1778}
1779
1780static void velocity_free_rings(struct velocity_info *vptr)
1781{
1782        velocity_free_td_ring(vptr);
1783        velocity_free_rd_ring(vptr);
1784        velocity_free_dma_rings(vptr);
1785}
1786
1787/**
1788 *      velocity_error  -       handle error from controller
1789 *      @vptr: velocity
1790 *      @status: card status
1791 *
1792 *      Process an error report from the hardware and attempt to recover
1793 *      the card itself. At the moment we cannot recover from some
1794 *      theoretically impossible errors but this could be fixed using
1795 *      the pci_device_failed logic to bounce the hardware
1796 *
1797 */
1798static void velocity_error(struct velocity_info *vptr, int status)
1799{
1800
1801        if (status & ISR_TXSTLI) {
1802                struct mac_regs __iomem *regs = vptr->mac_regs;
1803
1804                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1805                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1806                writew(TRDCSR_RUN, &regs->TDCSRClr);
1807                netif_stop_queue(vptr->dev);
1808
1809                /* FIXME: port over the pci_device_failed code and use it
1810                   here */
1811        }
1812
1813        if (status & ISR_SRCI) {
1814                struct mac_regs __iomem *regs = vptr->mac_regs;
1815                int linked;
1816
1817                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1818                        vptr->mii_status = check_connection_type(regs);
1819
1820                        /*
1821                         *      If it is a 3119, disable frame bursting in
1822                         *      halfduplex mode and enable it in fullduplex
1823                         *       mode
1824                         */
1825                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1826                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1827                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1828                                else
1829                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1830                        }
1831                        /*
1832                         *      Only enable CD heart beat counter in 10HD mode
1833                         */
1834                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1835                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1836                        else
1837                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1838
1839                        setup_queue_timers(vptr);
1840                }
1841                /*
1842                 *      Get link status from PHYSR0
1843                 */
1844                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1845
1846                if (linked) {
1847                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1848                        netif_carrier_on(vptr->dev);
1849                } else {
1850                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1851                        netif_carrier_off(vptr->dev);
1852                }
1853
1854                velocity_print_link_status(vptr);
1855                enable_flow_control_ability(vptr);
1856
1857                /*
1858                 *      Re-enable auto-polling because SRCI will disable
1859                 *      auto-polling
1860                 */
1861
1862                enable_mii_autopoll(regs);
1863
1864                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1865                        netif_stop_queue(vptr->dev);
1866                else
1867                        netif_wake_queue(vptr->dev);
1868
1869        }
1870        if (status & ISR_MIBFI)
1871                velocity_update_hw_mibs(vptr);
1872        if (status & ISR_LSTEI)
1873                mac_rx_queue_wake(vptr->mac_regs);
1874}
1875
1876/**
1877 *      tx_srv          -       transmit interrupt service
1878 *      @vptr; Velocity
1879 *
1880 *      Scan the queues looking for transmitted packets that
1881 *      we can complete and clean up. Update any statistics as
1882 *      necessary/
1883 */
1884static int velocity_tx_srv(struct velocity_info *vptr)
1885{
1886        struct tx_desc *td;
1887        int qnum;
1888        int full = 0;
1889        int idx;
1890        int works = 0;
1891        struct velocity_td_info *tdinfo;
1892        struct net_device_stats *stats = &vptr->dev->stats;
1893
1894        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1895                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1896                        idx = (idx + 1) % vptr->options.numtx) {
1897
1898                        /*
1899                         *      Get Tx Descriptor
1900                         */
1901                        td = &(vptr->tx.rings[qnum][idx]);
1902                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1903
1904                        if (td->tdesc0.len & OWNED_BY_NIC)
1905                                break;
1906
1907                        if ((works++ > 15))
1908                                break;
1909
1910                        if (td->tdesc0.TSR & TSR0_TERR) {
1911                                stats->tx_errors++;
1912                                stats->tx_dropped++;
1913                                if (td->tdesc0.TSR & TSR0_CDH)
1914                                        stats->tx_heartbeat_errors++;
1915                                if (td->tdesc0.TSR & TSR0_CRS)
1916                                        stats->tx_carrier_errors++;
1917                                if (td->tdesc0.TSR & TSR0_ABT)
1918                                        stats->tx_aborted_errors++;
1919                                if (td->tdesc0.TSR & TSR0_OWC)
1920                                        stats->tx_window_errors++;
1921                        } else {
1922                                stats->tx_packets++;
1923                                stats->tx_bytes += tdinfo->skb->len;
1924                        }
1925                        velocity_free_tx_buf(vptr, tdinfo, td);
1926                        vptr->tx.used[qnum]--;
1927                }
1928                vptr->tx.tail[qnum] = idx;
1929
1930                if (AVAIL_TD(vptr, qnum) < 1)
1931                        full = 1;
1932        }
1933        /*
1934         *      Look to see if we should kick the transmit network
1935         *      layer for more work.
1936         */
1937        if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1938            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1939                netif_wake_queue(vptr->dev);
1940        }
1941        return works;
1942}
1943
1944/**
1945 *      velocity_rx_csum        -       checksum process
1946 *      @rd: receive packet descriptor
1947 *      @skb: network layer packet buffer
1948 *
1949 *      Process the status bits for the received packet and determine
1950 *      if the checksum was computed and verified by the hardware
1951 */
1952static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1953{
1954        skb_checksum_none_assert(skb);
1955
1956        if (rd->rdesc1.CSM & CSM_IPKT) {
1957                if (rd->rdesc1.CSM & CSM_IPOK) {
1958                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1959                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1960                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1961                                        return;
1962                        }
1963                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1964                }
1965        }
1966}
1967
1968/**
1969 *      velocity_rx_copy        -       in place Rx copy for small packets
1970 *      @rx_skb: network layer packet buffer candidate
1971 *      @pkt_size: received data size
1972 *      @rd: receive packet descriptor
1973 *      @dev: network device
1974 *
1975 *      Replace the current skb that is scheduled for Rx processing by a
1976 *      shorter, immediately allocated skb, if the received packet is small
1977 *      enough. This function returns a negative value if the received
1978 *      packet is too big or if memory is exhausted.
1979 */
1980static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1981                            struct velocity_info *vptr)
1982{
1983        int ret = -1;
1984        if (pkt_size < rx_copybreak) {
1985                struct sk_buff *new_skb;
1986
1987                new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1988                if (new_skb) {
1989                        new_skb->ip_summed = rx_skb[0]->ip_summed;
1990                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1991                        *rx_skb = new_skb;
1992                        ret = 0;
1993                }
1994
1995        }
1996        return ret;
1997}
1998
1999/**
2000 *      velocity_iph_realign    -       IP header alignment
2001 *      @vptr: velocity we are handling
2002 *      @skb: network layer packet buffer
2003 *      @pkt_size: received data size
2004 *
2005 *      Align IP header on a 2 bytes boundary. This behavior can be
2006 *      configured by the user.
2007 */
2008static inline void velocity_iph_realign(struct velocity_info *vptr,
2009                                        struct sk_buff *skb, int pkt_size)
2010{
2011        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2012                memmove(skb->data + 2, skb->data, pkt_size);
2013                skb_reserve(skb, 2);
2014        }
2015}
2016
2017/**
2018 *      velocity_receive_frame  -       received packet processor
2019 *      @vptr: velocity we are handling
2020 *      @idx: ring index
2021 *
2022 *      A packet has arrived. We process the packet and if appropriate
2023 *      pass the frame up the network stack
2024 */
2025static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2026{
2027        void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2028        struct net_device_stats *stats = &vptr->dev->stats;
2029        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2030        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2031        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2032        struct sk_buff *skb;
2033
2034        if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2035                VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2036                stats->rx_length_errors++;
2037                return -EINVAL;
2038        }
2039
2040        if (rd->rdesc0.RSR & RSR_MAR)
2041                stats->multicast++;
2042
2043        skb = rd_info->skb;
2044
2045        pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2046                                    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2047
2048        /*
2049         *      Drop frame not meeting IEEE 802.3
2050         */
2051
2052        if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2053                if (rd->rdesc0.RSR & RSR_RL) {
2054                        stats->rx_length_errors++;
2055                        return -EINVAL;
2056                }
2057        }
2058
2059        pci_action = pci_dma_sync_single_for_device;
2060
2061        velocity_rx_csum(rd, skb);
2062
2063        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2064                velocity_iph_realign(vptr, skb, pkt_len);
2065                pci_action = pci_unmap_single;
2066                rd_info->skb = NULL;
2067        }
2068
2069        pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2070                   PCI_DMA_FROMDEVICE);
2071
2072        skb_put(skb, pkt_len - 4);
2073        skb->protocol = eth_type_trans(skb, vptr->dev);
2074
2075        if (rd->rdesc0.RSR & RSR_DETAG) {
2076                u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2077
2078                __vlan_hwaccel_put_tag(skb, vid);
2079        }
2080        netif_rx(skb);
2081
2082        stats->rx_bytes += pkt_len;
2083        stats->rx_packets++;
2084
2085        return 0;
2086}
2087
2088/**
2089 *      velocity_rx_srv         -       service RX interrupt
2090 *      @vptr: velocity
2091 *
2092 *      Walk the receive ring of the velocity adapter and remove
2093 *      any received packets from the receive queue. Hand the ring
2094 *      slots back to the adapter for reuse.
2095 */
2096static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2097{
2098        struct net_device_stats *stats = &vptr->dev->stats;
2099        int rd_curr = vptr->rx.curr;
2100        int works = 0;
2101
2102        while (works < budget_left) {
2103                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2104
2105                if (!vptr->rx.info[rd_curr].skb)
2106                        break;
2107
2108                if (rd->rdesc0.len & OWNED_BY_NIC)
2109                        break;
2110
2111                rmb();
2112
2113                /*
2114                 *      Don't drop CE or RL error frame although RXOK is off
2115                 */
2116                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2117                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2118                                stats->rx_dropped++;
2119                } else {
2120                        if (rd->rdesc0.RSR & RSR_CRC)
2121                                stats->rx_crc_errors++;
2122                        if (rd->rdesc0.RSR & RSR_FAE)
2123                                stats->rx_frame_errors++;
2124
2125                        stats->rx_dropped++;
2126                }
2127
2128                rd->size |= RX_INTEN;
2129
2130                rd_curr++;
2131                if (rd_curr >= vptr->options.numrx)
2132                        rd_curr = 0;
2133                works++;
2134        }
2135
2136        vptr->rx.curr = rd_curr;
2137
2138        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2139                velocity_give_many_rx_descs(vptr);
2140
2141        VAR_USED(stats);
2142        return works;
2143}
2144
2145static int velocity_poll(struct napi_struct *napi, int budget)
2146{
2147        struct velocity_info *vptr = container_of(napi,
2148                        struct velocity_info, napi);
2149        unsigned int rx_done;
2150        unsigned long flags;
2151
2152        spin_lock_irqsave(&vptr->lock, flags);
2153        /*
2154         * Do rx and tx twice for performance (taken from the VIA
2155         * out-of-tree driver).
2156         */
2157        rx_done = velocity_rx_srv(vptr, budget / 2);
2158        velocity_tx_srv(vptr);
2159        rx_done += velocity_rx_srv(vptr, budget - rx_done);
2160        velocity_tx_srv(vptr);
2161
2162        /* If budget not fully consumed, exit the polling mode */
2163        if (rx_done < budget) {
2164                napi_complete(napi);
2165                mac_enable_int(vptr->mac_regs);
2166        }
2167        spin_unlock_irqrestore(&vptr->lock, flags);
2168
2169        return rx_done;
2170}
2171
2172/**
2173 *      velocity_intr           -       interrupt callback
2174 *      @irq: interrupt number
2175 *      @dev_instance: interrupting device
2176 *
2177 *      Called whenever an interrupt is generated by the velocity
2178 *      adapter IRQ line. We may not be the source of the interrupt
2179 *      and need to identify initially if we are, and if not exit as
2180 *      efficiently as possible.
2181 */
2182static irqreturn_t velocity_intr(int irq, void *dev_instance)
2183{
2184        struct net_device *dev = dev_instance;
2185        struct velocity_info *vptr = netdev_priv(dev);
2186        u32 isr_status;
2187
2188        spin_lock(&vptr->lock);
2189        isr_status = mac_read_isr(vptr->mac_regs);
2190
2191        /* Not us ? */
2192        if (isr_status == 0) {
2193                spin_unlock(&vptr->lock);
2194                return IRQ_NONE;
2195        }
2196
2197        /* Ack the interrupt */
2198        mac_write_isr(vptr->mac_regs, isr_status);
2199
2200        if (likely(napi_schedule_prep(&vptr->napi))) {
2201                mac_disable_int(vptr->mac_regs);
2202                __napi_schedule(&vptr->napi);
2203        }
2204
2205        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2206                velocity_error(vptr, isr_status);
2207
2208        spin_unlock(&vptr->lock);
2209
2210        return IRQ_HANDLED;
2211}
2212
2213/**
2214 *      velocity_open           -       interface activation callback
2215 *      @dev: network layer device to open
2216 *
2217 *      Called when the network layer brings the interface up. Returns
2218 *      a negative posix error code on failure, or zero on success.
2219 *
2220 *      All the ring allocation and set up is done on open for this
2221 *      adapter to minimise memory usage when inactive
2222 */
2223static int velocity_open(struct net_device *dev)
2224{
2225        struct velocity_info *vptr = netdev_priv(dev);
2226        int ret;
2227
2228        ret = velocity_init_rings(vptr, dev->mtu);
2229        if (ret < 0)
2230                goto out;
2231
2232        /* Ensure chip is running */
2233        pci_set_power_state(vptr->pdev, PCI_D0);
2234
2235        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2236
2237        ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2238                          dev->name, dev);
2239        if (ret < 0) {
2240                /* Power down the chip */
2241                pci_set_power_state(vptr->pdev, PCI_D3hot);
2242                velocity_free_rings(vptr);
2243                goto out;
2244        }
2245
2246        velocity_give_many_rx_descs(vptr);
2247
2248        mac_enable_int(vptr->mac_regs);
2249        netif_start_queue(dev);
2250        napi_enable(&vptr->napi);
2251        vptr->flags |= VELOCITY_FLAGS_OPENED;
2252out:
2253        return ret;
2254}
2255
2256/**
2257 *      velocity_shutdown       -       shut down the chip
2258 *      @vptr: velocity to deactivate
2259 *
2260 *      Shuts down the internal operations of the velocity and
2261 *      disables interrupts, autopolling, transmit and receive
2262 */
2263static void velocity_shutdown(struct velocity_info *vptr)
2264{
2265        struct mac_regs __iomem *regs = vptr->mac_regs;
2266        mac_disable_int(regs);
2267        writel(CR0_STOP, &regs->CR0Set);
2268        writew(0xFFFF, &regs->TDCSRClr);
2269        writeb(0xFF, &regs->RDCSRClr);
2270        safe_disable_mii_autopoll(regs);
2271        mac_clear_isr(regs);
2272}
2273
2274/**
2275 *      velocity_change_mtu     -       MTU change callback
2276 *      @dev: network device
2277 *      @new_mtu: desired MTU
2278 *
2279 *      Handle requests from the networking layer for MTU change on
2280 *      this interface. It gets called on a change by the network layer.
2281 *      Return zero for success or negative posix error code.
2282 */
2283static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2284{
2285        struct velocity_info *vptr = netdev_priv(dev);
2286        int ret = 0;
2287
2288        if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2289                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2290                                vptr->dev->name);
2291                ret = -EINVAL;
2292                goto out_0;
2293        }
2294
2295        if (!netif_running(dev)) {
2296                dev->mtu = new_mtu;
2297                goto out_0;
2298        }
2299
2300        if (dev->mtu != new_mtu) {
2301                struct velocity_info *tmp_vptr;
2302                unsigned long flags;
2303                struct rx_info rx;
2304                struct tx_info tx;
2305
2306                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2307                if (!tmp_vptr) {
2308                        ret = -ENOMEM;
2309                        goto out_0;
2310                }
2311
2312                tmp_vptr->dev = dev;
2313                tmp_vptr->pdev = vptr->pdev;
2314                tmp_vptr->options = vptr->options;
2315                tmp_vptr->tx.numq = vptr->tx.numq;
2316
2317                ret = velocity_init_rings(tmp_vptr, new_mtu);
2318                if (ret < 0)
2319                        goto out_free_tmp_vptr_1;
2320
2321                spin_lock_irqsave(&vptr->lock, flags);
2322
2323                netif_stop_queue(dev);
2324                velocity_shutdown(vptr);
2325
2326                rx = vptr->rx;
2327                tx = vptr->tx;
2328
2329                vptr->rx = tmp_vptr->rx;
2330                vptr->tx = tmp_vptr->tx;
2331
2332                tmp_vptr->rx = rx;
2333                tmp_vptr->tx = tx;
2334
2335                dev->mtu = new_mtu;
2336
2337                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2338
2339                velocity_give_many_rx_descs(vptr);
2340
2341                mac_enable_int(vptr->mac_regs);
2342                netif_start_queue(dev);
2343
2344                spin_unlock_irqrestore(&vptr->lock, flags);
2345
2346                velocity_free_rings(tmp_vptr);
2347
2348out_free_tmp_vptr_1:
2349                kfree(tmp_vptr);
2350        }
2351out_0:
2352        return ret;
2353}
2354
2355/**
2356 *      velocity_mii_ioctl              -       MII ioctl handler
2357 *      @dev: network device
2358 *      @ifr: the ifreq block for the ioctl
2359 *      @cmd: the command
2360 *
2361 *      Process MII requests made via ioctl from the network layer. These
2362 *      are used by tools like kudzu to interrogate the link state of the
2363 *      hardware
2364 */
2365static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2366{
2367        struct velocity_info *vptr = netdev_priv(dev);
2368        struct mac_regs __iomem *regs = vptr->mac_regs;
2369        unsigned long flags;
2370        struct mii_ioctl_data *miidata = if_mii(ifr);
2371        int err;
2372
2373        switch (cmd) {
2374        case SIOCGMIIPHY:
2375                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2376                break;
2377        case SIOCGMIIREG:
2378                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2379                        return -ETIMEDOUT;
2380                break;
2381        case SIOCSMIIREG:
2382                spin_lock_irqsave(&vptr->lock, flags);
2383                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2384                spin_unlock_irqrestore(&vptr->lock, flags);
2385                check_connection_type(vptr->mac_regs);
2386                if (err)
2387                        return err;
2388                break;
2389        default:
2390                return -EOPNOTSUPP;
2391        }
2392        return 0;
2393}
2394
2395/**
2396 *      velocity_ioctl          -       ioctl entry point
2397 *      @dev: network device
2398 *      @rq: interface request ioctl
2399 *      @cmd: command code
2400 *
2401 *      Called when the user issues an ioctl request to the network
2402 *      device in question. The velocity interface supports MII.
2403 */
2404static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2405{
2406        struct velocity_info *vptr = netdev_priv(dev);
2407        int ret;
2408
2409        /* If we are asked for information and the device is power
2410           saving then we need to bring the device back up to talk to it */
2411
2412        if (!netif_running(dev))
2413                pci_set_power_state(vptr->pdev, PCI_D0);
2414
2415        switch (cmd) {
2416        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2417        case SIOCGMIIREG:       /* Read MII PHY register. */
2418        case SIOCSMIIREG:       /* Write to MII PHY register. */
2419                ret = velocity_mii_ioctl(dev, rq, cmd);
2420                break;
2421
2422        default:
2423                ret = -EOPNOTSUPP;
2424        }
2425        if (!netif_running(dev))
2426                pci_set_power_state(vptr->pdev, PCI_D3hot);
2427
2428
2429        return ret;
2430}
2431
2432/**
2433 *      velocity_get_status     -       statistics callback
2434 *      @dev: network device
2435 *
2436 *      Callback from the network layer to allow driver statistics
2437 *      to be resynchronized with hardware collected state. In the
2438 *      case of the velocity we need to pull the MIB counters from
2439 *      the hardware into the counters before letting the network
2440 *      layer display them.
2441 */
2442static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2443{
2444        struct velocity_info *vptr = netdev_priv(dev);
2445
2446        /* If the hardware is down, don't touch MII */
2447        if (!netif_running(dev))
2448                return &dev->stats;
2449
2450        spin_lock_irq(&vptr->lock);
2451        velocity_update_hw_mibs(vptr);
2452        spin_unlock_irq(&vptr->lock);
2453
2454        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2455        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2456        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2457
2458//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2459        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2460        /* detailed rx_errors: */
2461//  unsigned long   rx_length_errors;
2462//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2463        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2464//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2465//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2466//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2467
2468        /* detailed tx_errors */
2469//  unsigned long   tx_fifo_errors;
2470
2471        return &dev->stats;
2472}
2473
2474/**
2475 *      velocity_close          -       close adapter callback
2476 *      @dev: network device
2477 *
2478 *      Callback from the network layer when the velocity is being
2479 *      deactivated by the network layer
2480 */
2481static int velocity_close(struct net_device *dev)
2482{
2483        struct velocity_info *vptr = netdev_priv(dev);
2484
2485        napi_disable(&vptr->napi);
2486        netif_stop_queue(dev);
2487        velocity_shutdown(vptr);
2488
2489        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2490                velocity_get_ip(vptr);
2491
2492        free_irq(vptr->pdev->irq, dev);
2493
2494        velocity_free_rings(vptr);
2495
2496        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2497        return 0;
2498}
2499
2500/**
2501 *      velocity_xmit           -       transmit packet callback
2502 *      @skb: buffer to transmit
2503 *      @dev: network device
2504 *
2505 *      Called by the networ layer to request a packet is queued to
2506 *      the velocity. Returns zero on success.
2507 */
2508static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2509                                 struct net_device *dev)
2510{
2511        struct velocity_info *vptr = netdev_priv(dev);
2512        int qnum = 0;
2513        struct tx_desc *td_ptr;
2514        struct velocity_td_info *tdinfo;
2515        unsigned long flags;
2516        int pktlen;
2517        int index, prev;
2518        int i = 0;
2519
2520        if (skb_padto(skb, ETH_ZLEN))
2521                goto out;
2522
2523        /* The hardware can handle at most 7 memory segments, so merge
2524         * the skb if there are more */
2525        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2526                kfree_skb(skb);
2527                return NETDEV_TX_OK;
2528        }
2529
2530        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2531                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2532                                skb_headlen(skb);
2533
2534        spin_lock_irqsave(&vptr->lock, flags);
2535
2536        index = vptr->tx.curr[qnum];
2537        td_ptr = &(vptr->tx.rings[qnum][index]);
2538        tdinfo = &(vptr->tx.infos[qnum][index]);
2539
2540        td_ptr->tdesc1.TCR = TCR0_TIC;
2541        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2542
2543        /*
2544         *      Map the linear network buffer into PCI space and
2545         *      add it to the transmit ring.
2546         */
2547        tdinfo->skb = skb;
2548        tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2549        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2550        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2551        td_ptr->td_buf[0].pa_high = 0;
2552        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2553
2554        /* Handle fragments */
2555        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2556                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2557
2558                tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
2559                                                          frag, 0,
2560                                                          skb_frag_size(frag),
2561                                                          DMA_TO_DEVICE);
2562
2563                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2564                td_ptr->td_buf[i + 1].pa_high = 0;
2565                td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2566        }
2567        tdinfo->nskb_dma = i + 1;
2568
2569        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2570
2571        if (vlan_tx_tag_present(skb)) {
2572                td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2573                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2574        }
2575
2576        /*
2577         *      Handle hardware checksum
2578         */
2579        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2580                const struct iphdr *ip = ip_hdr(skb);
2581                if (ip->protocol == IPPROTO_TCP)
2582                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2583                else if (ip->protocol == IPPROTO_UDP)
2584                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2585                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2586        }
2587
2588        prev = index - 1;
2589        if (prev < 0)
2590                prev = vptr->options.numtx - 1;
2591        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2592        vptr->tx.used[qnum]++;
2593        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2594
2595        if (AVAIL_TD(vptr, qnum) < 1)
2596                netif_stop_queue(dev);
2597
2598        td_ptr = &(vptr->tx.rings[qnum][prev]);
2599        td_ptr->td_buf[0].size |= TD_QUEUE;
2600        mac_tx_queue_wake(vptr->mac_regs, qnum);
2601
2602        spin_unlock_irqrestore(&vptr->lock, flags);
2603out:
2604        return NETDEV_TX_OK;
2605}
2606
2607static const struct net_device_ops velocity_netdev_ops = {
2608        .ndo_open               = velocity_open,
2609        .ndo_stop               = velocity_close,
2610        .ndo_start_xmit         = velocity_xmit,
2611        .ndo_get_stats          = velocity_get_stats,
2612        .ndo_validate_addr      = eth_validate_addr,
2613        .ndo_set_mac_address    = eth_mac_addr,
2614        .ndo_set_rx_mode        = velocity_set_multi,
2615        .ndo_change_mtu         = velocity_change_mtu,
2616        .ndo_do_ioctl           = velocity_ioctl,
2617        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2618        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2619};
2620
2621/**
2622 *      velocity_init_info      -       init private data
2623 *      @pdev: PCI device
2624 *      @vptr: Velocity info
2625 *      @info: Board type
2626 *
2627 *      Set up the initial velocity_info struct for the device that has been
2628 *      discovered.
2629 */
2630static void __devinit velocity_init_info(struct pci_dev *pdev,
2631                                         struct velocity_info *vptr,
2632                                         const struct velocity_info_tbl *info)
2633{
2634        memset(vptr, 0, sizeof(struct velocity_info));
2635
2636        vptr->pdev = pdev;
2637        vptr->chip_id = info->chip_id;
2638        vptr->tx.numq = info->txqueue;
2639        vptr->multicast_limit = MCAM_SIZE;
2640        spin_lock_init(&vptr->lock);
2641}
2642
2643/**
2644 *      velocity_get_pci_info   -       retrieve PCI info for device
2645 *      @vptr: velocity device
2646 *      @pdev: PCI device it matches
2647 *
2648 *      Retrieve the PCI configuration space data that interests us from
2649 *      the kernel PCI layer
2650 */
2651static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2652{
2653        vptr->rev_id = pdev->revision;
2654
2655        pci_set_master(pdev);
2656
2657        vptr->ioaddr = pci_resource_start(pdev, 0);
2658        vptr->memaddr = pci_resource_start(pdev, 1);
2659
2660        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2661                dev_err(&pdev->dev,
2662                           "region #0 is not an I/O resource, aborting.\n");
2663                return -EINVAL;
2664        }
2665
2666        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2667                dev_err(&pdev->dev,
2668                           "region #1 is an I/O resource, aborting.\n");
2669                return -EINVAL;
2670        }
2671
2672        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2673                dev_err(&pdev->dev, "region #1 is too small.\n");
2674                return -EINVAL;
2675        }
2676        vptr->pdev = pdev;
2677
2678        return 0;
2679}
2680
2681/**
2682 *      velocity_print_info     -       per driver data
2683 *      @vptr: velocity
2684 *
2685 *      Print per driver data as the kernel driver finds Velocity
2686 *      hardware
2687 */
2688static void __devinit velocity_print_info(struct velocity_info *vptr)
2689{
2690        struct net_device *dev = vptr->dev;
2691
2692        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2693        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2694                dev->name, dev->dev_addr);
2695}
2696
2697static u32 velocity_get_link(struct net_device *dev)
2698{
2699        struct velocity_info *vptr = netdev_priv(dev);
2700        struct mac_regs __iomem *regs = vptr->mac_regs;
2701        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2702}
2703
2704/**
2705 *      velocity_found1         -       set up discovered velocity card
2706 *      @pdev: PCI device
2707 *      @ent: PCI device table entry that matched
2708 *
2709 *      Configure a discovered adapter from scratch. Return a negative
2710 *      errno error code on failure paths.
2711 */
2712static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2713{
2714        static int first = 1;
2715        struct net_device *dev;
2716        int i;
2717        const char *drv_string;
2718        const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2719        struct velocity_info *vptr;
2720        struct mac_regs __iomem *regs;
2721        int ret = -ENOMEM;
2722
2723        /* FIXME: this driver, like almost all other ethernet drivers,
2724         * can support more than MAX_UNITS.
2725         */
2726        if (velocity_nics >= MAX_UNITS) {
2727                dev_notice(&pdev->dev, "already found %d NICs.\n",
2728                           velocity_nics);
2729                return -ENODEV;
2730        }
2731
2732        dev = alloc_etherdev(sizeof(struct velocity_info));
2733        if (!dev)
2734                goto out;
2735
2736        /* Chain it all together */
2737
2738        SET_NETDEV_DEV(dev, &pdev->dev);
2739        vptr = netdev_priv(dev);
2740
2741
2742        if (first) {
2743                printk(KERN_INFO "%s Ver. %s\n",
2744                        VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2745                printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2746                printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2747                first = 0;
2748        }
2749
2750        velocity_init_info(pdev, vptr, info);
2751
2752        vptr->dev = dev;
2753
2754        ret = pci_enable_device(pdev);
2755        if (ret < 0)
2756                goto err_free_dev;
2757
2758        ret = velocity_get_pci_info(vptr, pdev);
2759        if (ret < 0) {
2760                /* error message already printed */
2761                goto err_disable;
2762        }
2763
2764        ret = pci_request_regions(pdev, VELOCITY_NAME);
2765        if (ret < 0) {
2766                dev_err(&pdev->dev, "No PCI resources.\n");
2767                goto err_disable;
2768        }
2769
2770        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2771        if (regs == NULL) {
2772                ret = -EIO;
2773                goto err_release_res;
2774        }
2775
2776        vptr->mac_regs = regs;
2777
2778        mac_wol_reset(regs);
2779
2780        for (i = 0; i < 6; i++)
2781                dev->dev_addr[i] = readb(&regs->PAR[i]);
2782
2783
2784        drv_string = dev_driver_string(&pdev->dev);
2785
2786        velocity_get_options(&vptr->options, velocity_nics, drv_string);
2787
2788        /*
2789         *      Mask out the options cannot be set to the chip
2790         */
2791
2792        vptr->options.flags &= info->flags;
2793
2794        /*
2795         *      Enable the chip specified capbilities
2796         */
2797
2798        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2799
2800        vptr->wol_opts = vptr->options.wol_opts;
2801        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2802
2803        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2804
2805        dev->netdev_ops = &velocity_netdev_ops;
2806        dev->ethtool_ops = &velocity_ethtool_ops;
2807        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2808
2809        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2810        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2811                NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2812
2813        ret = register_netdev(dev);
2814        if (ret < 0)
2815                goto err_iounmap;
2816
2817        if (!velocity_get_link(dev)) {
2818                netif_carrier_off(dev);
2819                vptr->mii_status |= VELOCITY_LINK_FAIL;
2820        }
2821
2822        velocity_print_info(vptr);
2823        pci_set_drvdata(pdev, dev);
2824
2825        /* and leave the chip powered down */
2826
2827        pci_set_power_state(pdev, PCI_D3hot);
2828        velocity_nics++;
2829out:
2830        return ret;
2831
2832err_iounmap:
2833        iounmap(regs);
2834err_release_res:
2835        pci_release_regions(pdev);
2836err_disable:
2837        pci_disable_device(pdev);
2838err_free_dev:
2839        free_netdev(dev);
2840        goto out;
2841}
2842
2843#ifdef CONFIG_PM
2844/**
2845 *      wol_calc_crc            -       WOL CRC
2846 *      @pattern: data pattern
2847 *      @mask_pattern: mask
2848 *
2849 *      Compute the wake on lan crc hashes for the packet header
2850 *      we are interested in.
2851 */
2852static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2853{
2854        u16 crc = 0xFFFF;
2855        u8 mask;
2856        int i, j;
2857
2858        for (i = 0; i < size; i++) {
2859                mask = mask_pattern[i];
2860
2861                /* Skip this loop if the mask equals to zero */
2862                if (mask == 0x00)
2863                        continue;
2864
2865                for (j = 0; j < 8; j++) {
2866                        if ((mask & 0x01) == 0) {
2867                                mask >>= 1;
2868                                continue;
2869                        }
2870                        mask >>= 1;
2871                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2872                }
2873        }
2874        /*      Finally, invert the result once to get the correct data */
2875        crc = ~crc;
2876        return bitrev32(crc) >> 16;
2877}
2878
2879/**
2880 *      velocity_set_wol        -       set up for wake on lan
2881 *      @vptr: velocity to set WOL status on
2882 *
2883 *      Set a card up for wake on lan either by unicast or by
2884 *      ARP packet.
2885 *
2886 *      FIXME: check static buffer is safe here
2887 */
2888static int velocity_set_wol(struct velocity_info *vptr)
2889{
2890        struct mac_regs __iomem *regs = vptr->mac_regs;
2891        enum speed_opt spd_dpx = vptr->options.spd_dpx;
2892        static u8 buf[256];
2893        int i;
2894
2895        static u32 mask_pattern[2][4] = {
2896                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2897                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
2898        };
2899
2900        writew(0xFFFF, &regs->WOLCRClr);
2901        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2902        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2903
2904        /*
2905           if (vptr->wol_opts & VELOCITY_WOL_PHY)
2906           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2907         */
2908
2909        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2910                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2911
2912        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2913                struct arp_packet *arp = (struct arp_packet *) buf;
2914                u16 crc;
2915                memset(buf, 0, sizeof(struct arp_packet) + 7);
2916
2917                for (i = 0; i < 4; i++)
2918                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2919
2920                arp->type = htons(ETH_P_ARP);
2921                arp->ar_op = htons(1);
2922
2923                memcpy(arp->ar_tip, vptr->ip_addr, 4);
2924
2925                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2926                                (u8 *) & mask_pattern[0][0]);
2927
2928                writew(crc, &regs->PatternCRC[0]);
2929                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2930        }
2931
2932        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2933        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2934
2935        writew(0x0FFF, &regs->WOLSRClr);
2936
2937        if (spd_dpx == SPD_DPX_1000_FULL)
2938                goto mac_done;
2939
2940        if (spd_dpx != SPD_DPX_AUTO)
2941                goto advertise_done;
2942
2943        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2944                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2945                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2946
2947                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2948        }
2949
2950        if (vptr->mii_status & VELOCITY_SPEED_1000)
2951                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2952
2953advertise_done:
2954        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2955
2956        {
2957                u8 GCR;
2958                GCR = readb(&regs->CHIPGCR);
2959                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2960                writeb(GCR, &regs->CHIPGCR);
2961        }
2962
2963mac_done:
2964        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2965        /* Turn on SWPTAG just before entering power mode */
2966        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2967        /* Go to bed ..... */
2968        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2969
2970        return 0;
2971}
2972
2973/**
2974 *      velocity_save_context   -       save registers
2975 *      @vptr: velocity
2976 *      @context: buffer for stored context
2977 *
2978 *      Retrieve the current configuration from the velocity hardware
2979 *      and stash it in the context structure, for use by the context
2980 *      restore functions. This allows us to save things we need across
2981 *      power down states
2982 */
2983static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2984{
2985        struct mac_regs __iomem *regs = vptr->mac_regs;
2986        u16 i;
2987        u8 __iomem *ptr = (u8 __iomem *)regs;
2988
2989        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2990                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2991
2992        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
2993                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2994
2995        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
2996                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2997
2998}
2999
3000static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3001{
3002        struct net_device *dev = pci_get_drvdata(pdev);
3003        struct velocity_info *vptr = netdev_priv(dev);
3004        unsigned long flags;
3005
3006        if (!netif_running(vptr->dev))
3007                return 0;
3008
3009        netif_device_detach(vptr->dev);
3010
3011        spin_lock_irqsave(&vptr->lock, flags);
3012        pci_save_state(pdev);
3013
3014        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3015                velocity_get_ip(vptr);
3016                velocity_save_context(vptr, &vptr->context);
3017                velocity_shutdown(vptr);
3018                velocity_set_wol(vptr);
3019                pci_enable_wake(pdev, PCI_D3hot, 1);
3020                pci_set_power_state(pdev, PCI_D3hot);
3021        } else {
3022                velocity_save_context(vptr, &vptr->context);
3023                velocity_shutdown(vptr);
3024                pci_disable_device(pdev);
3025                pci_set_power_state(pdev, pci_choose_state(pdev, state));
3026        }
3027
3028        spin_unlock_irqrestore(&vptr->lock, flags);
3029        return 0;
3030}
3031
3032/**
3033 *      velocity_restore_context        -       restore registers
3034 *      @vptr: velocity
3035 *      @context: buffer for stored context
3036 *
3037 *      Reload the register configuration from the velocity context
3038 *      created by velocity_save_context.
3039 */
3040static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3041{
3042        struct mac_regs __iomem *regs = vptr->mac_regs;
3043        int i;
3044        u8 __iomem *ptr = (u8 __iomem *)regs;
3045
3046        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3047                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3048
3049        /* Just skip cr0 */
3050        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3051                /* Clear */
3052                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3053                /* Set */
3054                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3055        }
3056
3057        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3058                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3059
3060        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3061                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3062
3063        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3064                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3065}
3066
3067static int velocity_resume(struct pci_dev *pdev)
3068{
3069        struct net_device *dev = pci_get_drvdata(pdev);
3070        struct velocity_info *vptr = netdev_priv(dev);
3071        unsigned long flags;
3072        int i;
3073
3074        if (!netif_running(vptr->dev))
3075                return 0;
3076
3077        pci_set_power_state(pdev, PCI_D0);
3078        pci_enable_wake(pdev, 0, 0);
3079        pci_restore_state(pdev);
3080
3081        mac_wol_reset(vptr->mac_regs);
3082
3083        spin_lock_irqsave(&vptr->lock, flags);
3084        velocity_restore_context(vptr, &vptr->context);
3085        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3086        mac_disable_int(vptr->mac_regs);
3087
3088        velocity_tx_srv(vptr);
3089
3090        for (i = 0; i < vptr->tx.numq; i++) {
3091                if (vptr->tx.used[i])
3092                        mac_tx_queue_wake(vptr->mac_regs, i);
3093        }
3094
3095        mac_enable_int(vptr->mac_regs);
3096        spin_unlock_irqrestore(&vptr->lock, flags);
3097        netif_device_attach(vptr->dev);
3098
3099        return 0;
3100}
3101#endif
3102
3103/*
3104 *      Definition for our device driver. The PCI layer interface
3105 *      uses this to handle all our card discover and plugging
3106 */
3107static struct pci_driver velocity_driver = {
3108        .name           = VELOCITY_NAME,
3109        .id_table       = velocity_id_table,
3110        .probe          = velocity_found1,
3111        .remove         = __devexit_p(velocity_remove1),
3112#ifdef CONFIG_PM
3113        .suspend        = velocity_suspend,
3114        .resume         = velocity_resume,
3115#endif
3116};
3117
3118
3119/**
3120 *      velocity_ethtool_up     -       pre hook for ethtool
3121 *      @dev: network device
3122 *
3123 *      Called before an ethtool operation. We need to make sure the
3124 *      chip is out of D3 state before we poke at it.
3125 */
3126static int velocity_ethtool_up(struct net_device *dev)
3127{
3128        struct velocity_info *vptr = netdev_priv(dev);
3129        if (!netif_running(dev))
3130                pci_set_power_state(vptr->pdev, PCI_D0);
3131        return 0;
3132}
3133
3134/**
3135 *      velocity_ethtool_down   -       post hook for ethtool
3136 *      @dev: network device
3137 *
3138 *      Called after an ethtool operation. Restore the chip back to D3
3139 *      state if it isn't running.
3140 */
3141static void velocity_ethtool_down(struct net_device *dev)
3142{
3143        struct velocity_info *vptr = netdev_priv(dev);
3144        if (!netif_running(dev))
3145                pci_set_power_state(vptr->pdev, PCI_D3hot);
3146}
3147
3148static int velocity_get_settings(struct net_device *dev,
3149                                 struct ethtool_cmd *cmd)
3150{
3151        struct velocity_info *vptr = netdev_priv(dev);
3152        struct mac_regs __iomem *regs = vptr->mac_regs;
3153        u32 status;
3154        status = check_connection_type(vptr->mac_regs);
3155
3156        cmd->supported = SUPPORTED_TP |
3157                        SUPPORTED_Autoneg |
3158                        SUPPORTED_10baseT_Half |
3159                        SUPPORTED_10baseT_Full |
3160                        SUPPORTED_100baseT_Half |
3161                        SUPPORTED_100baseT_Full |
3162                        SUPPORTED_1000baseT_Half |
3163                        SUPPORTED_1000baseT_Full;
3164
3165        cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3166        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3167                cmd->advertising |=
3168                        ADVERTISED_10baseT_Half |
3169                        ADVERTISED_10baseT_Full |
3170                        ADVERTISED_100baseT_Half |
3171                        ADVERTISED_100baseT_Full |
3172                        ADVERTISED_1000baseT_Half |
3173                        ADVERTISED_1000baseT_Full;
3174        } else {
3175                switch (vptr->options.spd_dpx) {
3176                case SPD_DPX_1000_FULL:
3177                        cmd->advertising |= ADVERTISED_1000baseT_Full;
3178                        break;
3179                case SPD_DPX_100_HALF:
3180                        cmd->advertising |= ADVERTISED_100baseT_Half;
3181                        break;
3182                case SPD_DPX_100_FULL:
3183                        cmd->advertising |= ADVERTISED_100baseT_Full;
3184                        break;
3185                case SPD_DPX_10_HALF:
3186                        cmd->advertising |= ADVERTISED_10baseT_Half;
3187                        break;
3188                case SPD_DPX_10_FULL:
3189                        cmd->advertising |= ADVERTISED_10baseT_Full;
3190                        break;
3191                default:
3192                        break;
3193                }
3194        }
3195
3196        if (status & VELOCITY_SPEED_1000)
3197                ethtool_cmd_speed_set(cmd, SPEED_1000);
3198        else if (status & VELOCITY_SPEED_100)
3199                ethtool_cmd_speed_set(cmd, SPEED_100);
3200        else
3201                ethtool_cmd_speed_set(cmd, SPEED_10);
3202
3203        cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3204        cmd->port = PORT_TP;
3205        cmd->transceiver = XCVR_INTERNAL;
3206        cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3207
3208        if (status & VELOCITY_DUPLEX_FULL)
3209                cmd->duplex = DUPLEX_FULL;
3210        else
3211                cmd->duplex = DUPLEX_HALF;
3212
3213        return 0;
3214}
3215
3216static int velocity_set_settings(struct net_device *dev,
3217                                 struct ethtool_cmd *cmd)
3218{
3219        struct velocity_info *vptr = netdev_priv(dev);
3220        u32 speed = ethtool_cmd_speed(cmd);
3221        u32 curr_status;
3222        u32 new_status = 0;
3223        int ret = 0;
3224
3225        curr_status = check_connection_type(vptr->mac_regs);
3226        curr_status &= (~VELOCITY_LINK_FAIL);
3227
3228        new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3229        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3230        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3231        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3232        new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3233
3234        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3235            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3236                ret = -EINVAL;
3237        } else {
3238                enum speed_opt spd_dpx;
3239
3240                if (new_status & VELOCITY_AUTONEG_ENABLE)
3241                        spd_dpx = SPD_DPX_AUTO;
3242                else if ((new_status & VELOCITY_SPEED_1000) &&
3243                         (new_status & VELOCITY_DUPLEX_FULL)) {
3244                        spd_dpx = SPD_DPX_1000_FULL;
3245                } else if (new_status & VELOCITY_SPEED_100)
3246                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3247                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3248                else if (new_status & VELOCITY_SPEED_10)
3249                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3250                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3251                else
3252                        return -EOPNOTSUPP;
3253
3254                vptr->options.spd_dpx = spd_dpx;
3255
3256                velocity_set_media_mode(vptr, new_status);
3257        }
3258
3259        return ret;
3260}
3261
3262static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3263{
3264        struct velocity_info *vptr = netdev_priv(dev);
3265        strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3266        strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3267        strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
3268}
3269
3270static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3271{
3272        struct velocity_info *vptr = netdev_priv(dev);
3273        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3274        wol->wolopts |= WAKE_MAGIC;
3275        /*
3276           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3277                   wol.wolopts|=WAKE_PHY;
3278                         */
3279        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3280                wol->wolopts |= WAKE_UCAST;
3281        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3282                wol->wolopts |= WAKE_ARP;
3283        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3284}
3285
3286static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3287{
3288        struct velocity_info *vptr = netdev_priv(dev);
3289
3290        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3291                return -EFAULT;
3292        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3293
3294        /*
3295           if (wol.wolopts & WAKE_PHY) {
3296           vptr->wol_opts|=VELOCITY_WOL_PHY;
3297           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3298           }
3299         */
3300
3301        if (wol->wolopts & WAKE_MAGIC) {
3302                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3303                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3304        }
3305        if (wol->wolopts & WAKE_UCAST) {
3306                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3307                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3308        }
3309        if (wol->wolopts & WAKE_ARP) {
3310                vptr->wol_opts |= VELOCITY_WOL_ARP;
3311                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3312        }
3313        memcpy(vptr->wol_passwd, wol->sopass, 6);
3314        return 0;
3315}
3316
3317static u32 velocity_get_msglevel(struct net_device *dev)
3318{
3319        return msglevel;
3320}
3321
3322static void velocity_set_msglevel(struct net_device *dev, u32 value)
3323{
3324         msglevel = value;
3325}
3326
3327static int get_pending_timer_val(int val)
3328{
3329        int mult_bits = val >> 6;
3330        int mult = 1;
3331
3332        switch (mult_bits)
3333        {
3334        case 1:
3335                mult = 4; break;
3336        case 2:
3337                mult = 16; break;
3338        case 3:
3339                mult = 64; break;
3340        case 0:
3341        default:
3342                break;
3343        }
3344
3345        return (val & 0x3f) * mult;
3346}
3347
3348static void set_pending_timer_val(int *val, u32 us)
3349{
3350        u8 mult = 0;
3351        u8 shift = 0;
3352
3353        if (us >= 0x3f) {
3354                mult = 1; /* mult with 4 */
3355                shift = 2;
3356        }
3357        if (us >= 0x3f * 4) {
3358                mult = 2; /* mult with 16 */
3359                shift = 4;
3360        }
3361        if (us >= 0x3f * 16) {
3362                mult = 3; /* mult with 64 */
3363                shift = 6;
3364        }
3365
3366        *val = (mult << 6) | ((us >> shift) & 0x3f);
3367}
3368
3369
3370static int velocity_get_coalesce(struct net_device *dev,
3371                struct ethtool_coalesce *ecmd)
3372{
3373        struct velocity_info *vptr = netdev_priv(dev);
3374
3375        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3376        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3377
3378        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3379        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3380
3381        return 0;
3382}
3383
3384static int velocity_set_coalesce(struct net_device *dev,
3385                struct ethtool_coalesce *ecmd)
3386{
3387        struct velocity_info *vptr = netdev_priv(dev);
3388        int max_us = 0x3f * 64;
3389        unsigned long flags;
3390
3391        /* 6 bits of  */
3392        if (ecmd->tx_coalesce_usecs > max_us)
3393                return -EINVAL;
3394        if (ecmd->rx_coalesce_usecs > max_us)
3395                return -EINVAL;
3396
3397        if (ecmd->tx_max_coalesced_frames > 0xff)
3398                return -EINVAL;
3399        if (ecmd->rx_max_coalesced_frames > 0xff)
3400                return -EINVAL;
3401
3402        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3403        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3404
3405        set_pending_timer_val(&vptr->options.rxqueue_timer,
3406                        ecmd->rx_coalesce_usecs);
3407        set_pending_timer_val(&vptr->options.txqueue_timer,
3408                        ecmd->tx_coalesce_usecs);
3409
3410        /* Setup the interrupt suppression and queue timers */
3411        spin_lock_irqsave(&vptr->lock, flags);
3412        mac_disable_int(vptr->mac_regs);
3413        setup_adaptive_interrupts(vptr);
3414        setup_queue_timers(vptr);
3415
3416        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3417        mac_clear_isr(vptr->mac_regs);
3418        mac_enable_int(vptr->mac_regs);
3419        spin_unlock_irqrestore(&vptr->lock, flags);
3420
3421        return 0;
3422}
3423
3424static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3425        "rx_all",
3426        "rx_ok",
3427        "tx_ok",
3428        "rx_error",
3429        "rx_runt_ok",
3430        "rx_runt_err",
3431        "rx_64",
3432        "tx_64",
3433        "rx_65_to_127",
3434        "tx_65_to_127",
3435        "rx_128_to_255",
3436        "tx_128_to_255",
3437        "rx_256_to_511",
3438        "tx_256_to_511",
3439        "rx_512_to_1023",
3440        "tx_512_to_1023",
3441        "rx_1024_to_1518",
3442        "tx_1024_to_1518",
3443        "tx_ether_collisions",
3444        "rx_crc_errors",
3445        "rx_jumbo",
3446        "tx_jumbo",
3447        "rx_mac_control_frames",
3448        "tx_mac_control_frames",
3449        "rx_frame_alignement_errors",
3450        "rx_long_ok",
3451        "rx_long_err",
3452        "tx_sqe_errors",
3453        "rx_no_buf",
3454        "rx_symbol_errors",
3455        "in_range_length_errors",
3456        "late_collisions"
3457};
3458
3459static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3460{
3461        switch (sset) {
3462        case ETH_SS_STATS:
3463                memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3464                break;
3465        }
3466}
3467
3468static int velocity_get_sset_count(struct net_device *dev, int sset)
3469{
3470        switch (sset) {
3471        case ETH_SS_STATS:
3472                return ARRAY_SIZE(velocity_gstrings);
3473        default:
3474                return -EOPNOTSUPP;
3475        }
3476}
3477
3478static void velocity_get_ethtool_stats(struct net_device *dev,
3479                                       struct ethtool_stats *stats, u64 *data)
3480{
3481        if (netif_running(dev)) {
3482                struct velocity_info *vptr = netdev_priv(dev);
3483                u32 *p = vptr->mib_counter;
3484                int i;
3485
3486                spin_lock_irq(&vptr->lock);
3487                velocity_update_hw_mibs(vptr);
3488                spin_unlock_irq(&vptr->lock);
3489
3490                for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3491                        *data++ = *p++;
3492        }
3493}
3494
3495static const struct ethtool_ops velocity_ethtool_ops = {
3496        .get_settings           = velocity_get_settings,
3497        .set_settings           = velocity_set_settings,
3498        .get_drvinfo            = velocity_get_drvinfo,
3499        .get_wol                = velocity_ethtool_get_wol,
3500        .set_wol                = velocity_ethtool_set_wol,
3501        .get_msglevel           = velocity_get_msglevel,
3502        .set_msglevel           = velocity_set_msglevel,
3503        .get_link               = velocity_get_link,
3504        .get_strings            = velocity_get_strings,
3505        .get_sset_count         = velocity_get_sset_count,
3506        .get_ethtool_stats      = velocity_get_ethtool_stats,
3507        .get_coalesce           = velocity_get_coalesce,
3508        .set_coalesce           = velocity_set_coalesce,
3509        .begin                  = velocity_ethtool_up,
3510        .complete               = velocity_ethtool_down
3511};
3512
3513#if defined(CONFIG_PM) && defined(CONFIG_INET)
3514static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3515{
3516        struct in_ifaddr *ifa = ptr;
3517        struct net_device *dev = ifa->ifa_dev->dev;
3518
3519        if (dev_net(dev) == &init_net &&
3520            dev->netdev_ops == &velocity_netdev_ops)
3521                velocity_get_ip(netdev_priv(dev));
3522
3523        return NOTIFY_DONE;
3524}
3525
3526static struct notifier_block velocity_inetaddr_notifier = {
3527        .notifier_call  = velocity_netdev_event,
3528};
3529
3530static void velocity_register_notifier(void)
3531{
3532        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3533}
3534
3535static void velocity_unregister_notifier(void)
3536{
3537        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3538}
3539
3540#else
3541
3542#define velocity_register_notifier()    do {} while (0)
3543#define velocity_unregister_notifier()  do {} while (0)
3544
3545#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3546
3547/**
3548 *      velocity_init_module    -       load time function
3549 *
3550 *      Called when the velocity module is loaded. The PCI driver
3551 *      is registered with the PCI layer, and in turn will call
3552 *      the probe functions for each velocity adapter installed
3553 *      in the system.
3554 */
3555static int __init velocity_init_module(void)
3556{
3557        int ret;
3558
3559        velocity_register_notifier();
3560        ret = pci_register_driver(&velocity_driver);
3561        if (ret < 0)
3562                velocity_unregister_notifier();
3563        return ret;
3564}
3565
3566/**
3567 *      velocity_cleanup        -       module unload
3568 *
3569 *      When the velocity hardware is unloaded this function is called.
3570 *      It will clean up the notifiers and the unregister the PCI
3571 *      driver interface for this hardware. This in turn cleans up
3572 *      all discovered interfaces before returning from the function
3573 */
3574static void __exit velocity_cleanup_module(void)
3575{
3576        velocity_unregister_notifier();
3577        pci_unregister_driver(&velocity_driver);
3578}
3579
3580module_init(velocity_init_module);
3581module_exit(velocity_cleanup_module);
3582