linux/drivers/net/ethernet/via/via-velocity.c
<<
>>
Prefs
   1/*
   2 * This code is derived from the VIA reference driver (copyright message
   3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   4 * addition to the Linux kernel.
   5 *
   6 * The code has been merged into one source file, cleaned up to follow
   7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   8 * for 64bit hardware platforms.
   9 *
  10 * TODO
  11 *      rx_copybreak/alignment
  12 *      More testing
  13 *
  14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  15 * Additional fixes and clean up: Francois Romieu
  16 *
  17 * This source has not been verified for use in safety critical systems.
  18 *
  19 * Please direct queries about the revamped driver to the linux-kernel
  20 * list not VIA.
  21 *
  22 * Original code:
  23 *
  24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  25 * All rights reserved.
  26 *
  27 * This software may be redistributed and/or modified under
  28 * the terms of the GNU General Public License as published by the Free
  29 * Software Foundation; either version 2 of the License, or
  30 * any later version.
  31 *
  32 * This program is distributed in the hope that it will be useful, but
  33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  35 * for more details.
  36 *
  37 * Author: Chuang Liang-Shing, AJ Jiang
  38 *
  39 * Date: Jan 24, 2003
  40 *
  41 * MODULE_LICENSE("GPL");
  42 *
  43 */
  44
  45#include <linux/module.h>
  46#include <linux/types.h>
  47#include <linux/bitops.h>
  48#include <linux/init.h>
  49#include <linux/mm.h>
  50#include <linux/errno.h>
  51#include <linux/ioport.h>
  52#include <linux/pci.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/delay.h>
  58#include <linux/timer.h>
  59#include <linux/slab.h>
  60#include <linux/interrupt.h>
  61#include <linux/string.h>
  62#include <linux/wait.h>
  63#include <linux/io.h>
  64#include <linux/if.h>
  65#include <linux/uaccess.h>
  66#include <linux/proc_fs.h>
  67#include <linux/inetdevice.h>
  68#include <linux/reboot.h>
  69#include <linux/ethtool.h>
  70#include <linux/mii.h>
  71#include <linux/in.h>
  72#include <linux/if_arp.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/udp.h>
  77#include <linux/crc-ccitt.h>
  78#include <linux/crc32.h>
  79
  80#include "via-velocity.h"
  81
  82
  83static int velocity_nics;
  84static int msglevel = MSG_LEVEL_INFO;
  85
  86/**
  87 *      mac_get_cam_mask        -       Read a CAM mask
  88 *      @regs: register block for this velocity
  89 *      @mask: buffer to store mask
  90 *
  91 *      Fetch the mask bits of the selected CAM and store them into the
  92 *      provided mask buffer.
  93 */
  94static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
  95{
  96        int i;
  97
  98        /* Select CAM mask */
  99        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 100
 101        writeb(0, &regs->CAMADDR);
 102
 103        /* read mask */
 104        for (i = 0; i < 8; i++)
 105                *mask++ = readb(&(regs->MARCAM[i]));
 106
 107        /* disable CAMEN */
 108        writeb(0, &regs->CAMADDR);
 109
 110        /* Select mar */
 111        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 112}
 113
 114/**
 115 *      mac_set_cam_mask        -       Set a CAM mask
 116 *      @regs: register block for this velocity
 117 *      @mask: CAM mask to load
 118 *
 119 *      Store a new mask into a CAM
 120 */
 121static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 122{
 123        int i;
 124        /* Select CAM mask */
 125        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 126
 127        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 128
 129        for (i = 0; i < 8; i++)
 130                writeb(*mask++, &(regs->MARCAM[i]));
 131
 132        /* disable CAMEN */
 133        writeb(0, &regs->CAMADDR);
 134
 135        /* Select mar */
 136        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 137}
 138
 139static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 140{
 141        int i;
 142        /* Select CAM mask */
 143        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 144
 145        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 146
 147        for (i = 0; i < 8; i++)
 148                writeb(*mask++, &(regs->MARCAM[i]));
 149
 150        /* disable CAMEN */
 151        writeb(0, &regs->CAMADDR);
 152
 153        /* Select mar */
 154        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 155}
 156
 157/**
 158 *      mac_set_cam     -       set CAM data
 159 *      @regs: register block of this velocity
 160 *      @idx: Cam index
 161 *      @addr: 2 or 6 bytes of CAM data
 162 *
 163 *      Load an address or vlan tag into a CAM
 164 */
 165static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 166{
 167        int i;
 168
 169        /* Select CAM mask */
 170        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 171
 172        idx &= (64 - 1);
 173
 174        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 175
 176        for (i = 0; i < 6; i++)
 177                writeb(*addr++, &(regs->MARCAM[i]));
 178
 179        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 180
 181        udelay(10);
 182
 183        writeb(0, &regs->CAMADDR);
 184
 185        /* Select mar */
 186        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 187}
 188
 189static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 190                             const u8 *addr)
 191{
 192
 193        /* Select CAM mask */
 194        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 195
 196        idx &= (64 - 1);
 197
 198        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 199        writew(*((u16 *) addr), &regs->MARCAM[0]);
 200
 201        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 202
 203        udelay(10);
 204
 205        writeb(0, &regs->CAMADDR);
 206
 207        /* Select mar */
 208        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 209}
 210
 211
 212/**
 213 *      mac_wol_reset   -       reset WOL after exiting low power
 214 *      @regs: register block of this velocity
 215 *
 216 *      Called after we drop out of wake on lan mode in order to
 217 *      reset the Wake on lan features. This function doesn't restore
 218 *      the rest of the logic from the result of sleep/wakeup
 219 */
 220static void mac_wol_reset(struct mac_regs __iomem *regs)
 221{
 222
 223        /* Turn off SWPTAG right after leaving power mode */
 224        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 225        /* clear sticky bits */
 226        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 227
 228        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 229        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 230        /* disable force PME-enable */
 231        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 232        /* disable power-event config bit */
 233        writew(0xFFFF, &regs->WOLCRClr);
 234        /* clear power status */
 235        writew(0xFFFF, &regs->WOLSRClr);
 236}
 237
 238static const struct ethtool_ops velocity_ethtool_ops;
 239
 240/*
 241    Define module options
 242*/
 243
 244MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 245MODULE_LICENSE("GPL");
 246MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 247
 248#define VELOCITY_PARAM(N, D) \
 249        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 250        module_param_array(N, int, NULL, 0); \
 251        MODULE_PARM_DESC(N, D);
 252
 253#define RX_DESC_MIN     64
 254#define RX_DESC_MAX     255
 255#define RX_DESC_DEF     64
 256VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 257
 258#define TX_DESC_MIN     16
 259#define TX_DESC_MAX     256
 260#define TX_DESC_DEF     64
 261VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 262
 263#define RX_THRESH_MIN   0
 264#define RX_THRESH_MAX   3
 265#define RX_THRESH_DEF   0
 266/* rx_thresh[] is used for controlling the receive fifo threshold.
 267   0: indicate the rxfifo threshold is 128 bytes.
 268   1: indicate the rxfifo threshold is 512 bytes.
 269   2: indicate the rxfifo threshold is 1024 bytes.
 270   3: indicate the rxfifo threshold is store & forward.
 271*/
 272VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 273
 274#define DMA_LENGTH_MIN  0
 275#define DMA_LENGTH_MAX  7
 276#define DMA_LENGTH_DEF  6
 277
 278/* DMA_length[] is used for controlling the DMA length
 279   0: 8 DWORDs
 280   1: 16 DWORDs
 281   2: 32 DWORDs
 282   3: 64 DWORDs
 283   4: 128 DWORDs
 284   5: 256 DWORDs
 285   6: SF(flush till emply)
 286   7: SF(flush till emply)
 287*/
 288VELOCITY_PARAM(DMA_length, "DMA length");
 289
 290#define IP_ALIG_DEF     0
 291/* IP_byte_align[] is used for IP header DWORD byte aligned
 292   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 293   1: indicate the IP header will be DWORD byte aligned.
 294      In some environment, the IP header should be DWORD byte aligned,
 295      or the packet will be droped when we receive it. (eg: IPVS)
 296*/
 297VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 298
 299#define FLOW_CNTL_DEF   1
 300#define FLOW_CNTL_MIN   1
 301#define FLOW_CNTL_MAX   5
 302
 303/* flow_control[] is used for setting the flow control ability of NIC.
 304   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 305   2: enable TX flow control.
 306   3: enable RX flow control.
 307   4: enable RX/TX flow control.
 308   5: disable
 309*/
 310VELOCITY_PARAM(flow_control, "Enable flow control ability");
 311
 312#define MED_LNK_DEF 0
 313#define MED_LNK_MIN 0
 314#define MED_LNK_MAX 5
 315/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 316   0: indicate autonegotiation for both speed and duplex mode
 317   1: indicate 100Mbps half duplex mode
 318   2: indicate 100Mbps full duplex mode
 319   3: indicate 10Mbps half duplex mode
 320   4: indicate 10Mbps full duplex mode
 321   5: indicate 1000Mbps full duplex mode
 322
 323   Note:
 324   if EEPROM have been set to the force mode, this option is ignored
 325   by driver.
 326*/
 327VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 328
 329#define VAL_PKT_LEN_DEF     0
 330/* ValPktLen[] is used for setting the checksum offload ability of NIC.
 331   0: Receive frame with invalid layer 2 length (Default)
 332   1: Drop frame with invalid layer 2 length
 333*/
 334VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
 335
 336#define WOL_OPT_DEF     0
 337#define WOL_OPT_MIN     0
 338#define WOL_OPT_MAX     7
 339/* wol_opts[] is used for controlling wake on lan behavior.
 340   0: Wake up if recevied a magic packet. (Default)
 341   1: Wake up if link status is on/off.
 342   2: Wake up if recevied an arp packet.
 343   4: Wake up if recevied any unicast packet.
 344   Those value can be sumed up to support more than one option.
 345*/
 346VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 347
 348static int rx_copybreak = 200;
 349module_param(rx_copybreak, int, 0644);
 350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 351
 352/*
 353 *      Internal board variants. At the moment we have only one
 354 */
 355static struct velocity_info_tbl chip_info_table[] = {
 356        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 357        { }
 358};
 359
 360/*
 361 *      Describe the PCI device identifiers that we support in this
 362 *      device driver. Used for hotplug autoloading.
 363 */
 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
 365        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 366        { }
 367};
 368
 369MODULE_DEVICE_TABLE(pci, velocity_id_table);
 370
 371/**
 372 *      get_chip_name   -       identifier to name
 373 *      @id: chip identifier
 374 *
 375 *      Given a chip identifier return a suitable description. Returns
 376 *      a pointer a static string valid while the driver is loaded.
 377 */
 378static const char *get_chip_name(enum chip_type chip_id)
 379{
 380        int i;
 381        for (i = 0; chip_info_table[i].name != NULL; i++)
 382                if (chip_info_table[i].chip_id == chip_id)
 383                        break;
 384        return chip_info_table[i].name;
 385}
 386
 387/**
 388 *      velocity_remove1        -       device unplug
 389 *      @pdev: PCI device being removed
 390 *
 391 *      Device unload callback. Called on an unplug or on module
 392 *      unload for each active device that is present. Disconnects
 393 *      the device from the network layer and frees all the resources
 394 */
 395static void velocity_remove1(struct pci_dev *pdev)
 396{
 397        struct net_device *dev = pci_get_drvdata(pdev);
 398        struct velocity_info *vptr = netdev_priv(dev);
 399
 400        unregister_netdev(dev);
 401        iounmap(vptr->mac_regs);
 402        pci_release_regions(pdev);
 403        pci_disable_device(pdev);
 404        pci_set_drvdata(pdev, NULL);
 405        free_netdev(dev);
 406
 407        velocity_nics--;
 408}
 409
 410/**
 411 *      velocity_set_int_opt    -       parser for integer options
 412 *      @opt: pointer to option value
 413 *      @val: value the user requested (or -1 for default)
 414 *      @min: lowest value allowed
 415 *      @max: highest value allowed
 416 *      @def: default value
 417 *      @name: property name
 418 *      @dev: device name
 419 *
 420 *      Set an integer property in the module options. This function does
 421 *      all the verification and checking as well as reporting so that
 422 *      we don't duplicate code for each option.
 423 */
 424static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
 425                                 char *name, const char *devname)
 426{
 427        if (val == -1)
 428                *opt = def;
 429        else if (val < min || val > max) {
 430                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 431                                        devname, name, min, max);
 432                *opt = def;
 433        } else {
 434                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 435                                        devname, name, val);
 436                *opt = val;
 437        }
 438}
 439
 440/**
 441 *      velocity_set_bool_opt   -       parser for boolean options
 442 *      @opt: pointer to option value
 443 *      @val: value the user requested (or -1 for default)
 444 *      @def: default value (yes/no)
 445 *      @flag: numeric value to set for true.
 446 *      @name: property name
 447 *      @dev: device name
 448 *
 449 *      Set a boolean property in the module options. This function does
 450 *      all the verification and checking as well as reporting so that
 451 *      we don't duplicate code for each option.
 452 */
 453static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
 454                                  char *name, const char *devname)
 455{
 456        (*opt) &= (~flag);
 457        if (val == -1)
 458                *opt |= (def ? flag : 0);
 459        else if (val < 0 || val > 1) {
 460                printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 461                        devname, name);
 462                *opt |= (def ? flag : 0);
 463        } else {
 464                printk(KERN_INFO "%s: set parameter %s to %s\n",
 465                        devname, name, val ? "TRUE" : "FALSE");
 466                *opt |= (val ? flag : 0);
 467        }
 468}
 469
 470/**
 471 *      velocity_get_options    -       set options on device
 472 *      @opts: option structure for the device
 473 *      @index: index of option to use in module options array
 474 *      @devname: device name
 475 *
 476 *      Turn the module and command options into a single structure
 477 *      for the current device
 478 */
 479static void velocity_get_options(struct velocity_opt *opts, int index,
 480                                 const char *devname)
 481{
 482
 483        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 484        velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 485        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 486        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 487
 488        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 489        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 490        velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 491        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 492        velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 493        opts->numrx = (opts->numrx & ~3);
 494}
 495
 496/**
 497 *      velocity_init_cam_filter        -       initialise CAM
 498 *      @vptr: velocity to program
 499 *
 500 *      Initialize the content addressable memory used for filters. Load
 501 *      appropriately according to the presence of VLAN
 502 */
 503static void velocity_init_cam_filter(struct velocity_info *vptr)
 504{
 505        struct mac_regs __iomem *regs = vptr->mac_regs;
 506        unsigned int vid, i = 0;
 507
 508        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 509        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 510        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 511
 512        /* Disable all CAMs */
 513        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 514        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 515        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 516        mac_set_cam_mask(regs, vptr->mCAMmask);
 517
 518        /* Enable VCAMs */
 519        for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 520                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 521                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 522                if (++i >= VCAM_SIZE)
 523                        break;
 524        }
 525        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 526}
 527
 528static int velocity_vlan_rx_add_vid(struct net_device *dev,
 529                                    __be16 proto, u16 vid)
 530{
 531        struct velocity_info *vptr = netdev_priv(dev);
 532
 533        spin_lock_irq(&vptr->lock);
 534        set_bit(vid, vptr->active_vlans);
 535        velocity_init_cam_filter(vptr);
 536        spin_unlock_irq(&vptr->lock);
 537        return 0;
 538}
 539
 540static int velocity_vlan_rx_kill_vid(struct net_device *dev,
 541                                     __be16 proto, u16 vid)
 542{
 543        struct velocity_info *vptr = netdev_priv(dev);
 544
 545        spin_lock_irq(&vptr->lock);
 546        clear_bit(vid, vptr->active_vlans);
 547        velocity_init_cam_filter(vptr);
 548        spin_unlock_irq(&vptr->lock);
 549        return 0;
 550}
 551
 552static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 553{
 554        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 555}
 556
 557/**
 558 *      velocity_rx_reset       -       handle a receive reset
 559 *      @vptr: velocity we are resetting
 560 *
 561 *      Reset the ownership and status for the receive ring side.
 562 *      Hand all the receive queue to the NIC.
 563 */
 564static void velocity_rx_reset(struct velocity_info *vptr)
 565{
 566
 567        struct mac_regs __iomem *regs = vptr->mac_regs;
 568        int i;
 569
 570        velocity_init_rx_ring_indexes(vptr);
 571
 572        /*
 573         *      Init state, all RD entries belong to the NIC
 574         */
 575        for (i = 0; i < vptr->options.numrx; ++i)
 576                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 577
 578        writew(vptr->options.numrx, &regs->RBRDU);
 579        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 580        writew(0, &regs->RDIdx);
 581        writew(vptr->options.numrx - 1, &regs->RDCSize);
 582}
 583
 584/**
 585 *      velocity_get_opt_media_mode     -       get media selection
 586 *      @vptr: velocity adapter
 587 *
 588 *      Get the media mode stored in EEPROM or module options and load
 589 *      mii_status accordingly. The requested link state information
 590 *      is also returned.
 591 */
 592static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 593{
 594        u32 status = 0;
 595
 596        switch (vptr->options.spd_dpx) {
 597        case SPD_DPX_AUTO:
 598                status = VELOCITY_AUTONEG_ENABLE;
 599                break;
 600        case SPD_DPX_100_FULL:
 601                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 602                break;
 603        case SPD_DPX_10_FULL:
 604                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 605                break;
 606        case SPD_DPX_100_HALF:
 607                status = VELOCITY_SPEED_100;
 608                break;
 609        case SPD_DPX_10_HALF:
 610                status = VELOCITY_SPEED_10;
 611                break;
 612        case SPD_DPX_1000_FULL:
 613                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 614                break;
 615        }
 616        vptr->mii_status = status;
 617        return status;
 618}
 619
 620/**
 621 *      safe_disable_mii_autopoll       -       autopoll off
 622 *      @regs: velocity registers
 623 *
 624 *      Turn off the autopoll and wait for it to disable on the chip
 625 */
 626static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 627{
 628        u16 ww;
 629
 630        /*  turn off MAUTO */
 631        writeb(0, &regs->MIICR);
 632        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 633                udelay(1);
 634                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 635                        break;
 636        }
 637}
 638
 639/**
 640 *      enable_mii_autopoll     -       turn on autopolling
 641 *      @regs: velocity registers
 642 *
 643 *      Enable the MII link status autopoll feature on the Velocity
 644 *      hardware. Wait for it to enable.
 645 */
 646static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 647{
 648        int ii;
 649
 650        writeb(0, &(regs->MIICR));
 651        writeb(MIIADR_SWMPL, &regs->MIIADR);
 652
 653        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 654                udelay(1);
 655                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 656                        break;
 657        }
 658
 659        writeb(MIICR_MAUTO, &regs->MIICR);
 660
 661        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 662                udelay(1);
 663                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 664                        break;
 665        }
 666
 667}
 668
 669/**
 670 *      velocity_mii_read       -       read MII data
 671 *      @regs: velocity registers
 672 *      @index: MII register index
 673 *      @data: buffer for received data
 674 *
 675 *      Perform a single read of an MII 16bit register. Returns zero
 676 *      on success or -ETIMEDOUT if the PHY did not respond.
 677 */
 678static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 679{
 680        u16 ww;
 681
 682        /*
 683         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 684         */
 685        safe_disable_mii_autopoll(regs);
 686
 687        writeb(index, &regs->MIIADR);
 688
 689        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 690
 691        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 692                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 693                        break;
 694        }
 695
 696        *data = readw(&regs->MIIDATA);
 697
 698        enable_mii_autopoll(regs);
 699        if (ww == W_MAX_TIMEOUT)
 700                return -ETIMEDOUT;
 701        return 0;
 702}
 703
 704/**
 705 *      mii_check_media_mode    -       check media state
 706 *      @regs: velocity registers
 707 *
 708 *      Check the current MII status and determine the link status
 709 *      accordingly
 710 */
 711static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 712{
 713        u32 status = 0;
 714        u16 ANAR;
 715
 716        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 717                status |= VELOCITY_LINK_FAIL;
 718
 719        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 720                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 721        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 722                status |= (VELOCITY_SPEED_1000);
 723        else {
 724                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 725                if (ANAR & ADVERTISE_100FULL)
 726                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 727                else if (ANAR & ADVERTISE_100HALF)
 728                        status |= VELOCITY_SPEED_100;
 729                else if (ANAR & ADVERTISE_10FULL)
 730                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 731                else
 732                        status |= (VELOCITY_SPEED_10);
 733        }
 734
 735        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 736                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 737                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 738                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 739                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 740                                status |= VELOCITY_AUTONEG_ENABLE;
 741                }
 742        }
 743
 744        return status;
 745}
 746
 747/**
 748 *      velocity_mii_write      -       write MII data
 749 *      @regs: velocity registers
 750 *      @index: MII register index
 751 *      @data: 16bit data for the MII register
 752 *
 753 *      Perform a single write to an MII 16bit register. Returns zero
 754 *      on success or -ETIMEDOUT if the PHY did not respond.
 755 */
 756static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 757{
 758        u16 ww;
 759
 760        /*
 761         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 762         */
 763        safe_disable_mii_autopoll(regs);
 764
 765        /* MII reg offset */
 766        writeb(mii_addr, &regs->MIIADR);
 767        /* set MII data */
 768        writew(data, &regs->MIIDATA);
 769
 770        /* turn on MIICR_WCMD */
 771        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 772
 773        /* W_MAX_TIMEOUT is the timeout period */
 774        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 775                udelay(5);
 776                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 777                        break;
 778        }
 779        enable_mii_autopoll(regs);
 780
 781        if (ww == W_MAX_TIMEOUT)
 782                return -ETIMEDOUT;
 783        return 0;
 784}
 785
 786/**
 787 *      set_mii_flow_control    -       flow control setup
 788 *      @vptr: velocity interface
 789 *
 790 *      Set up the flow control on this interface according to
 791 *      the supplied user/eeprom options.
 792 */
 793static void set_mii_flow_control(struct velocity_info *vptr)
 794{
 795        /*Enable or Disable PAUSE in ANAR */
 796        switch (vptr->options.flow_cntl) {
 797        case FLOW_CNTL_TX:
 798                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 799                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 800                break;
 801
 802        case FLOW_CNTL_RX:
 803                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 804                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 805                break;
 806
 807        case FLOW_CNTL_TX_RX:
 808                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 809                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 810                break;
 811
 812        case FLOW_CNTL_DISABLE:
 813                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 814                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 815                break;
 816        default:
 817                break;
 818        }
 819}
 820
 821/**
 822 *      mii_set_auto_on         -       autonegotiate on
 823 *      @vptr: velocity
 824 *
 825 *      Enable autonegotation on this interface
 826 */
 827static void mii_set_auto_on(struct velocity_info *vptr)
 828{
 829        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 830                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 831        else
 832                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 833}
 834
 835static u32 check_connection_type(struct mac_regs __iomem *regs)
 836{
 837        u32 status = 0;
 838        u8 PHYSR0;
 839        u16 ANAR;
 840        PHYSR0 = readb(&regs->PHYSR0);
 841
 842        /*
 843           if (!(PHYSR0 & PHYSR0_LINKGD))
 844           status|=VELOCITY_LINK_FAIL;
 845         */
 846
 847        if (PHYSR0 & PHYSR0_FDPX)
 848                status |= VELOCITY_DUPLEX_FULL;
 849
 850        if (PHYSR0 & PHYSR0_SPDG)
 851                status |= VELOCITY_SPEED_1000;
 852        else if (PHYSR0 & PHYSR0_SPD10)
 853                status |= VELOCITY_SPEED_10;
 854        else
 855                status |= VELOCITY_SPEED_100;
 856
 857        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 858                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 859                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 860                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 861                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 862                                status |= VELOCITY_AUTONEG_ENABLE;
 863                }
 864        }
 865
 866        return status;
 867}
 868
 869/**
 870 *      velocity_set_media_mode         -       set media mode
 871 *      @mii_status: old MII link state
 872 *
 873 *      Check the media link state and configure the flow control
 874 *      PHY and also velocity hardware setup accordingly. In particular
 875 *      we need to set up CD polling and frame bursting.
 876 */
 877static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 878{
 879        u32 curr_status;
 880        struct mac_regs __iomem *regs = vptr->mac_regs;
 881
 882        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 883        curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 884
 885        /* Set mii link status */
 886        set_mii_flow_control(vptr);
 887
 888        /*
 889           Check if new status is consistent with current status
 890           if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 891               (mii_status==curr_status)) {
 892           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 893           vptr->mii_status=check_connection_type(vptr->mac_regs);
 894           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 895           return 0;
 896           }
 897         */
 898
 899        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 900                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 901
 902        /*
 903         *      If connection type is AUTO
 904         */
 905        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 906                VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 907                /* clear force MAC mode bit */
 908                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 909                /* set duplex mode of MAC according to duplex mode of MII */
 910                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 911                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 912                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 913
 914                /* enable AUTO-NEGO mode */
 915                mii_set_auto_on(vptr);
 916        } else {
 917                u16 CTRL1000;
 918                u16 ANAR;
 919                u8 CHIPGCR;
 920
 921                /*
 922                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 923                 *    and enable it in fullduplex mode
 924                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 925                 * 3. only enable CD heart beat counter in 10HD mode
 926                 */
 927
 928                /* set force MAC mode bit */
 929                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 930
 931                CHIPGCR = readb(&regs->CHIPGCR);
 932
 933                if (mii_status & VELOCITY_SPEED_1000)
 934                        CHIPGCR |= CHIPGCR_FCGMII;
 935                else
 936                        CHIPGCR &= ~CHIPGCR_FCGMII;
 937
 938                if (mii_status & VELOCITY_DUPLEX_FULL) {
 939                        CHIPGCR |= CHIPGCR_FCFDX;
 940                        writeb(CHIPGCR, &regs->CHIPGCR);
 941                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 942                        if (vptr->rev_id < REV_ID_VT3216_A0)
 943                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 944                } else {
 945                        CHIPGCR &= ~CHIPGCR_FCFDX;
 946                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 947                        writeb(CHIPGCR, &regs->CHIPGCR);
 948                        if (vptr->rev_id < REV_ID_VT3216_A0)
 949                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 950                }
 951
 952                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 953                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 954                if ((mii_status & VELOCITY_SPEED_1000) &&
 955                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 956                        CTRL1000 |= ADVERTISE_1000FULL;
 957                }
 958                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 959
 960                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 961                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 962                else
 963                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 964
 965                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 966                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 967                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 968                if (mii_status & VELOCITY_SPEED_100) {
 969                        if (mii_status & VELOCITY_DUPLEX_FULL)
 970                                ANAR |= ADVERTISE_100FULL;
 971                        else
 972                                ANAR |= ADVERTISE_100HALF;
 973                } else if (mii_status & VELOCITY_SPEED_10) {
 974                        if (mii_status & VELOCITY_DUPLEX_FULL)
 975                                ANAR |= ADVERTISE_10FULL;
 976                        else
 977                                ANAR |= ADVERTISE_10HALF;
 978                }
 979                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 980                /* enable AUTO-NEGO mode */
 981                mii_set_auto_on(vptr);
 982                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 983        }
 984        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 985        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 986        return VELOCITY_LINK_CHANGE;
 987}
 988
 989/**
 990 *      velocity_print_link_status      -       link status reporting
 991 *      @vptr: velocity to report on
 992 *
 993 *      Turn the link status of the velocity card into a kernel log
 994 *      description of the new link state, detailing speed and duplex
 995 *      status
 996 */
 997static void velocity_print_link_status(struct velocity_info *vptr)
 998{
 999
1000        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1001                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1002        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1003                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1004
1005                if (vptr->mii_status & VELOCITY_SPEED_1000)
1006                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1007                else if (vptr->mii_status & VELOCITY_SPEED_100)
1008                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1009                else
1010                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1011
1012                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1013                        VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1014                else
1015                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1016        } else {
1017                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1018                switch (vptr->options.spd_dpx) {
1019                case SPD_DPX_1000_FULL:
1020                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1021                        break;
1022                case SPD_DPX_100_HALF:
1023                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1024                        break;
1025                case SPD_DPX_100_FULL:
1026                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1027                        break;
1028                case SPD_DPX_10_HALF:
1029                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1030                        break;
1031                case SPD_DPX_10_FULL:
1032                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1033                        break;
1034                default:
1035                        break;
1036                }
1037        }
1038}
1039
1040/**
1041 *      enable_flow_control_ability     -       flow control
1042 *      @vptr: veloity to configure
1043 *
1044 *      Set up flow control according to the flow control options
1045 *      determined by the eeprom/configuration.
1046 */
1047static void enable_flow_control_ability(struct velocity_info *vptr)
1048{
1049
1050        struct mac_regs __iomem *regs = vptr->mac_regs;
1051
1052        switch (vptr->options.flow_cntl) {
1053
1054        case FLOW_CNTL_DEFAULT:
1055                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1056                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1057                else
1058                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1059
1060                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1061                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1062                else
1063                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1064                break;
1065
1066        case FLOW_CNTL_TX:
1067                writel(CR0_FDXTFCEN, &regs->CR0Set);
1068                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1069                break;
1070
1071        case FLOW_CNTL_RX:
1072                writel(CR0_FDXRFCEN, &regs->CR0Set);
1073                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1074                break;
1075
1076        case FLOW_CNTL_TX_RX:
1077                writel(CR0_FDXTFCEN, &regs->CR0Set);
1078                writel(CR0_FDXRFCEN, &regs->CR0Set);
1079                break;
1080
1081        case FLOW_CNTL_DISABLE:
1082                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1083                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1084                break;
1085
1086        default:
1087                break;
1088        }
1089
1090}
1091
1092/**
1093 *      velocity_soft_reset     -       soft reset
1094 *      @vptr: velocity to reset
1095 *
1096 *      Kick off a soft reset of the velocity adapter and then poll
1097 *      until the reset sequence has completed before returning.
1098 */
1099static int velocity_soft_reset(struct velocity_info *vptr)
1100{
1101        struct mac_regs __iomem *regs = vptr->mac_regs;
1102        int i = 0;
1103
1104        writel(CR0_SFRST, &regs->CR0Set);
1105
1106        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1107                udelay(5);
1108                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1109                        break;
1110        }
1111
1112        if (i == W_MAX_TIMEOUT) {
1113                writel(CR0_FORSRST, &regs->CR0Set);
1114                /* FIXME: PCI POSTING */
1115                /* delay 2ms */
1116                mdelay(2);
1117        }
1118        return 0;
1119}
1120
1121/**
1122 *      velocity_set_multi      -       filter list change callback
1123 *      @dev: network device
1124 *
1125 *      Called by the network layer when the filter lists need to change
1126 *      for a velocity adapter. Reload the CAMs with the new address
1127 *      filter ruleset.
1128 */
1129static void velocity_set_multi(struct net_device *dev)
1130{
1131        struct velocity_info *vptr = netdev_priv(dev);
1132        struct mac_regs __iomem *regs = vptr->mac_regs;
1133        u8 rx_mode;
1134        int i;
1135        struct netdev_hw_addr *ha;
1136
1137        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1138                writel(0xffffffff, &regs->MARCAM[0]);
1139                writel(0xffffffff, &regs->MARCAM[4]);
1140                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1141        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1142                   (dev->flags & IFF_ALLMULTI)) {
1143                writel(0xffffffff, &regs->MARCAM[0]);
1144                writel(0xffffffff, &regs->MARCAM[4]);
1145                rx_mode = (RCR_AM | RCR_AB);
1146        } else {
1147                int offset = MCAM_SIZE - vptr->multicast_limit;
1148                mac_get_cam_mask(regs, vptr->mCAMmask);
1149
1150                i = 0;
1151                netdev_for_each_mc_addr(ha, dev) {
1152                        mac_set_cam(regs, i + offset, ha->addr);
1153                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1154                        i++;
1155                }
1156
1157                mac_set_cam_mask(regs, vptr->mCAMmask);
1158                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1159        }
1160        if (dev->mtu > 1500)
1161                rx_mode |= RCR_AL;
1162
1163        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1164
1165}
1166
1167/*
1168 * MII access , media link mode setting functions
1169 */
1170
1171/**
1172 *      mii_init        -       set up MII
1173 *      @vptr: velocity adapter
1174 *      @mii_status:  links tatus
1175 *
1176 *      Set up the PHY for the current link state.
1177 */
1178static void mii_init(struct velocity_info *vptr, u32 mii_status)
1179{
1180        u16 BMCR;
1181
1182        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1183        case PHYID_CICADA_CS8201:
1184                /*
1185                 *      Reset to hardware default
1186                 */
1187                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1188                /*
1189                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1190                 *      off it in NWay-forced half mode for NWay-forced v.s.
1191                 *      legacy-forced issue.
1192                 */
1193                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1195                else
1196                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1197                /*
1198                 *      Turn on Link/Activity LED enable bit for CIS8201
1199                 */
1200                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1201                break;
1202        case PHYID_VT3216_32BIT:
1203        case PHYID_VT3216_64BIT:
1204                /*
1205                 *      Reset to hardware default
1206                 */
1207                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1208                /*
1209                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1210                 *      off it in NWay-forced half mode for NWay-forced v.s.
1211                 *      legacy-forced issue
1212                 */
1213                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1214                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215                else
1216                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1217                break;
1218
1219        case PHYID_MARVELL_1000:
1220        case PHYID_MARVELL_1000S:
1221                /*
1222                 *      Assert CRS on Transmit
1223                 */
1224                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1225                /*
1226                 *      Reset to hardware default
1227                 */
1228                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1229                break;
1230        default:
1231                ;
1232        }
1233        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1234        if (BMCR & BMCR_ISOLATE) {
1235                BMCR &= ~BMCR_ISOLATE;
1236                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1237        }
1238}
1239
1240/**
1241 * setup_queue_timers   -       Setup interrupt timers
1242 *
1243 * Setup interrupt frequency during suppression (timeout if the frame
1244 * count isn't filled).
1245 */
1246static void setup_queue_timers(struct velocity_info *vptr)
1247{
1248        /* Only for newer revisions */
1249        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1250                u8 txqueue_timer = 0;
1251                u8 rxqueue_timer = 0;
1252
1253                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1254                                VELOCITY_SPEED_100)) {
1255                        txqueue_timer = vptr->options.txqueue_timer;
1256                        rxqueue_timer = vptr->options.rxqueue_timer;
1257                }
1258
1259                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1260                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1261        }
1262}
1263
1264/**
1265 * setup_adaptive_interrupts  -  Setup interrupt suppression
1266 *
1267 * @vptr velocity adapter
1268 *
1269 * The velocity is able to suppress interrupt during high interrupt load.
1270 * This function turns on that feature.
1271 */
1272static void setup_adaptive_interrupts(struct velocity_info *vptr)
1273{
1274        struct mac_regs __iomem *regs = vptr->mac_regs;
1275        u16 tx_intsup = vptr->options.tx_intsup;
1276        u16 rx_intsup = vptr->options.rx_intsup;
1277
1278        /* Setup default interrupt mask (will be changed below) */
1279        vptr->int_mask = INT_MASK_DEF;
1280
1281        /* Set Tx Interrupt Suppression Threshold */
1282        writeb(CAMCR_PS0, &regs->CAMCR);
1283        if (tx_intsup != 0) {
1284                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1285                                ISR_PTX2I | ISR_PTX3I);
1286                writew(tx_intsup, &regs->ISRCTL);
1287        } else
1288                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1289
1290        /* Set Rx Interrupt Suppression Threshold */
1291        writeb(CAMCR_PS1, &regs->CAMCR);
1292        if (rx_intsup != 0) {
1293                vptr->int_mask &= ~ISR_PRXI;
1294                writew(rx_intsup, &regs->ISRCTL);
1295        } else
1296                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1297
1298        /* Select page to interrupt hold timer */
1299        writeb(0, &regs->CAMCR);
1300}
1301
1302/**
1303 *      velocity_init_registers -       initialise MAC registers
1304 *      @vptr: velocity to init
1305 *      @type: type of initialisation (hot or cold)
1306 *
1307 *      Initialise the MAC on a reset or on first set up on the
1308 *      hardware.
1309 */
1310static void velocity_init_registers(struct velocity_info *vptr,
1311                                    enum velocity_init_type type)
1312{
1313        struct mac_regs __iomem *regs = vptr->mac_regs;
1314        int i, mii_status;
1315
1316        mac_wol_reset(regs);
1317
1318        switch (type) {
1319        case VELOCITY_INIT_RESET:
1320        case VELOCITY_INIT_WOL:
1321
1322                netif_stop_queue(vptr->dev);
1323
1324                /*
1325                 *      Reset RX to prevent RX pointer not on the 4X location
1326                 */
1327                velocity_rx_reset(vptr);
1328                mac_rx_queue_run(regs);
1329                mac_rx_queue_wake(regs);
1330
1331                mii_status = velocity_get_opt_media_mode(vptr);
1332                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1333                        velocity_print_link_status(vptr);
1334                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1335                                netif_wake_queue(vptr->dev);
1336                }
1337
1338                enable_flow_control_ability(vptr);
1339
1340                mac_clear_isr(regs);
1341                writel(CR0_STOP, &regs->CR0Clr);
1342                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1343                                                        &regs->CR0Set);
1344
1345                break;
1346
1347        case VELOCITY_INIT_COLD:
1348        default:
1349                /*
1350                 *      Do reset
1351                 */
1352                velocity_soft_reset(vptr);
1353                mdelay(5);
1354
1355                mac_eeprom_reload(regs);
1356                for (i = 0; i < 6; i++)
1357                        writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1358
1359                /*
1360                 *      clear Pre_ACPI bit.
1361                 */
1362                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1363                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1364                mac_set_dma_length(regs, vptr->options.DMA_length);
1365
1366                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1367                /*
1368                 *      Back off algorithm use original IEEE standard
1369                 */
1370                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1371
1372                /*
1373                 *      Init CAM filter
1374                 */
1375                velocity_init_cam_filter(vptr);
1376
1377                /*
1378                 *      Set packet filter: Receive directed and broadcast address
1379                 */
1380                velocity_set_multi(vptr->dev);
1381
1382                /*
1383                 *      Enable MII auto-polling
1384                 */
1385                enable_mii_autopoll(regs);
1386
1387                setup_adaptive_interrupts(vptr);
1388
1389                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1390                writew(vptr->options.numrx - 1, &regs->RDCSize);
1391                mac_rx_queue_run(regs);
1392                mac_rx_queue_wake(regs);
1393
1394                writew(vptr->options.numtx - 1, &regs->TDCSize);
1395
1396                for (i = 0; i < vptr->tx.numq; i++) {
1397                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1398                        mac_tx_queue_run(regs, i);
1399                }
1400
1401                init_flow_control_register(vptr);
1402
1403                writel(CR0_STOP, &regs->CR0Clr);
1404                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1405
1406                mii_status = velocity_get_opt_media_mode(vptr);
1407                netif_stop_queue(vptr->dev);
1408
1409                mii_init(vptr, mii_status);
1410
1411                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1412                        velocity_print_link_status(vptr);
1413                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1414                                netif_wake_queue(vptr->dev);
1415                }
1416
1417                enable_flow_control_ability(vptr);
1418                mac_hw_mibs_init(regs);
1419                mac_write_int_mask(vptr->int_mask, regs);
1420                mac_clear_isr(regs);
1421
1422        }
1423}
1424
1425static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1426{
1427        struct mac_regs __iomem *regs = vptr->mac_regs;
1428        int avail, dirty, unusable;
1429
1430        /*
1431         * RD number must be equal to 4X per hardware spec
1432         * (programming guide rev 1.20, p.13)
1433         */
1434        if (vptr->rx.filled < 4)
1435                return;
1436
1437        wmb();
1438
1439        unusable = vptr->rx.filled & 0x0003;
1440        dirty = vptr->rx.dirty - unusable;
1441        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1442                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1443                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1444        }
1445
1446        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1447        vptr->rx.filled = unusable;
1448}
1449
1450/**
1451 *      velocity_init_dma_rings -       set up DMA rings
1452 *      @vptr: Velocity to set up
1453 *
1454 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1455 *      to use.
1456 */
1457static int velocity_init_dma_rings(struct velocity_info *vptr)
1458{
1459        struct velocity_opt *opt = &vptr->options;
1460        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1461        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1462        struct pci_dev *pdev = vptr->pdev;
1463        dma_addr_t pool_dma;
1464        void *pool;
1465        unsigned int i;
1466
1467        /*
1468         * Allocate all RD/TD rings a single pool.
1469         *
1470         * pci_alloc_consistent() fulfills the requirement for 64 bytes
1471         * alignment
1472         */
1473        pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1474                                    rx_ring_size, &pool_dma);
1475        if (!pool) {
1476                dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1477                        vptr->dev->name);
1478                return -ENOMEM;
1479        }
1480
1481        vptr->rx.ring = pool;
1482        vptr->rx.pool_dma = pool_dma;
1483
1484        pool += rx_ring_size;
1485        pool_dma += rx_ring_size;
1486
1487        for (i = 0; i < vptr->tx.numq; i++) {
1488                vptr->tx.rings[i] = pool;
1489                vptr->tx.pool_dma[i] = pool_dma;
1490                pool += tx_ring_size;
1491                pool_dma += tx_ring_size;
1492        }
1493
1494        return 0;
1495}
1496
1497static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1498{
1499        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1500}
1501
1502/**
1503 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1504 *      @vptr: velocity
1505 *      @idx: ring index
1506 *
1507 *      Allocate a new full sized buffer for the reception of a frame and
1508 *      map it into PCI space for the hardware to use. The hardware
1509 *      requires *64* byte alignment of the buffer which makes life
1510 *      less fun than would be ideal.
1511 */
1512static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1513{
1514        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1516
1517        rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
1518        if (rd_info->skb == NULL)
1519                return -ENOMEM;
1520
1521        /*
1522         *      Do the gymnastics to get the buffer head for data at
1523         *      64byte alignment.
1524         */
1525        skb_reserve(rd_info->skb,
1526                        64 - ((unsigned long) rd_info->skb->data & 63));
1527        rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1528                                        vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1529
1530        /*
1531         *      Fill in the descriptor to match
1532         */
1533
1534        *((u32 *) & (rd->rdesc0)) = 0;
1535        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1536        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1537        rd->pa_high = 0;
1538        return 0;
1539}
1540
1541
1542static int velocity_rx_refill(struct velocity_info *vptr)
1543{
1544        int dirty = vptr->rx.dirty, done = 0;
1545
1546        do {
1547                struct rx_desc *rd = vptr->rx.ring + dirty;
1548
1549                /* Fine for an all zero Rx desc at init time as well */
1550                if (rd->rdesc0.len & OWNED_BY_NIC)
1551                        break;
1552
1553                if (!vptr->rx.info[dirty].skb) {
1554                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1555                                break;
1556                }
1557                done++;
1558                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1559        } while (dirty != vptr->rx.curr);
1560
1561        if (done) {
1562                vptr->rx.dirty = dirty;
1563                vptr->rx.filled += done;
1564        }
1565
1566        return done;
1567}
1568
1569/**
1570 *      velocity_free_rd_ring   -       free receive ring
1571 *      @vptr: velocity to clean up
1572 *
1573 *      Free the receive buffers for each ring slot and any
1574 *      attached socket buffers that need to go away.
1575 */
1576static void velocity_free_rd_ring(struct velocity_info *vptr)
1577{
1578        int i;
1579
1580        if (vptr->rx.info == NULL)
1581                return;
1582
1583        for (i = 0; i < vptr->options.numrx; i++) {
1584                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1585                struct rx_desc *rd = vptr->rx.ring + i;
1586
1587                memset(rd, 0, sizeof(*rd));
1588
1589                if (!rd_info->skb)
1590                        continue;
1591                pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1592                                 PCI_DMA_FROMDEVICE);
1593                rd_info->skb_dma = 0;
1594
1595                dev_kfree_skb(rd_info->skb);
1596                rd_info->skb = NULL;
1597        }
1598
1599        kfree(vptr->rx.info);
1600        vptr->rx.info = NULL;
1601}
1602
1603/**
1604 *      velocity_init_rd_ring   -       set up receive ring
1605 *      @vptr: velocity to configure
1606 *
1607 *      Allocate and set up the receive buffers for each ring slot and
1608 *      assign them to the network adapter.
1609 */
1610static int velocity_init_rd_ring(struct velocity_info *vptr)
1611{
1612        int ret = -ENOMEM;
1613
1614        vptr->rx.info = kcalloc(vptr->options.numrx,
1615                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1616        if (!vptr->rx.info)
1617                goto out;
1618
1619        velocity_init_rx_ring_indexes(vptr);
1620
1621        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1622                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1623                        "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1624                velocity_free_rd_ring(vptr);
1625                goto out;
1626        }
1627
1628        ret = 0;
1629out:
1630        return ret;
1631}
1632
1633/**
1634 *      velocity_init_td_ring   -       set up transmit ring
1635 *      @vptr:  velocity
1636 *
1637 *      Set up the transmit ring and chain the ring pointers together.
1638 *      Returns zero on success or a negative posix errno code for
1639 *      failure.
1640 */
1641static int velocity_init_td_ring(struct velocity_info *vptr)
1642{
1643        int j;
1644
1645        /* Init the TD ring entries */
1646        for (j = 0; j < vptr->tx.numq; j++) {
1647
1648                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1649                                            sizeof(struct velocity_td_info),
1650                                            GFP_KERNEL);
1651                if (!vptr->tx.infos[j]) {
1652                        while (--j >= 0)
1653                                kfree(vptr->tx.infos[j]);
1654                        return -ENOMEM;
1655                }
1656
1657                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1658        }
1659        return 0;
1660}
1661
1662/**
1663 *      velocity_free_dma_rings -       free PCI ring pointers
1664 *      @vptr: Velocity to free from
1665 *
1666 *      Clean up the PCI ring buffers allocated to this velocity.
1667 */
1668static void velocity_free_dma_rings(struct velocity_info *vptr)
1669{
1670        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1671                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1672
1673        pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1674}
1675
1676static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1677{
1678        int ret;
1679
1680        velocity_set_rxbufsize(vptr, mtu);
1681
1682        ret = velocity_init_dma_rings(vptr);
1683        if (ret < 0)
1684                goto out;
1685
1686        ret = velocity_init_rd_ring(vptr);
1687        if (ret < 0)
1688                goto err_free_dma_rings_0;
1689
1690        ret = velocity_init_td_ring(vptr);
1691        if (ret < 0)
1692                goto err_free_rd_ring_1;
1693out:
1694        return ret;
1695
1696err_free_rd_ring_1:
1697        velocity_free_rd_ring(vptr);
1698err_free_dma_rings_0:
1699        velocity_free_dma_rings(vptr);
1700        goto out;
1701}
1702
1703/**
1704 *      velocity_free_tx_buf    -       free transmit buffer
1705 *      @vptr: velocity
1706 *      @tdinfo: buffer
1707 *
1708 *      Release an transmit buffer. If the buffer was preallocated then
1709 *      recycle it, if not then unmap the buffer.
1710 */
1711static void velocity_free_tx_buf(struct velocity_info *vptr,
1712                struct velocity_td_info *tdinfo, struct tx_desc *td)
1713{
1714        struct sk_buff *skb = tdinfo->skb;
1715
1716        /*
1717         *      Don't unmap the pre-allocated tx_bufs
1718         */
1719        if (tdinfo->skb_dma) {
1720                int i;
1721
1722                for (i = 0; i < tdinfo->nskb_dma; i++) {
1723                        size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1724
1725                        /* For scatter-gather */
1726                        if (skb_shinfo(skb)->nr_frags > 0)
1727                                pktlen = max_t(size_t, pktlen,
1728                                                td->td_buf[i].size & ~TD_QUEUE);
1729
1730                        pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1731                                        le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1732                }
1733        }
1734        dev_kfree_skb_irq(skb);
1735        tdinfo->skb = NULL;
1736}
1737
1738/*
1739 *      FIXME: could we merge this with velocity_free_tx_buf ?
1740 */
1741static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1742                                                         int q, int n)
1743{
1744        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1745        int i;
1746
1747        if (td_info == NULL)
1748                return;
1749
1750        if (td_info->skb) {
1751                for (i = 0; i < td_info->nskb_dma; i++) {
1752                        if (td_info->skb_dma[i]) {
1753                                pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1754                                        td_info->skb->len, PCI_DMA_TODEVICE);
1755                                td_info->skb_dma[i] = 0;
1756                        }
1757                }
1758                dev_kfree_skb(td_info->skb);
1759                td_info->skb = NULL;
1760        }
1761}
1762
1763/**
1764 *      velocity_free_td_ring   -       free td ring
1765 *      @vptr: velocity
1766 *
1767 *      Free up the transmit ring for this particular velocity adapter.
1768 *      We free the ring contents but not the ring itself.
1769 */
1770static void velocity_free_td_ring(struct velocity_info *vptr)
1771{
1772        int i, j;
1773
1774        for (j = 0; j < vptr->tx.numq; j++) {
1775                if (vptr->tx.infos[j] == NULL)
1776                        continue;
1777                for (i = 0; i < vptr->options.numtx; i++)
1778                        velocity_free_td_ring_entry(vptr, j, i);
1779
1780                kfree(vptr->tx.infos[j]);
1781                vptr->tx.infos[j] = NULL;
1782        }
1783}
1784
1785static void velocity_free_rings(struct velocity_info *vptr)
1786{
1787        velocity_free_td_ring(vptr);
1788        velocity_free_rd_ring(vptr);
1789        velocity_free_dma_rings(vptr);
1790}
1791
1792/**
1793 *      velocity_error  -       handle error from controller
1794 *      @vptr: velocity
1795 *      @status: card status
1796 *
1797 *      Process an error report from the hardware and attempt to recover
1798 *      the card itself. At the moment we cannot recover from some
1799 *      theoretically impossible errors but this could be fixed using
1800 *      the pci_device_failed logic to bounce the hardware
1801 *
1802 */
1803static void velocity_error(struct velocity_info *vptr, int status)
1804{
1805
1806        if (status & ISR_TXSTLI) {
1807                struct mac_regs __iomem *regs = vptr->mac_regs;
1808
1809                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1810                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1811                writew(TRDCSR_RUN, &regs->TDCSRClr);
1812                netif_stop_queue(vptr->dev);
1813
1814                /* FIXME: port over the pci_device_failed code and use it
1815                   here */
1816        }
1817
1818        if (status & ISR_SRCI) {
1819                struct mac_regs __iomem *regs = vptr->mac_regs;
1820                int linked;
1821
1822                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1823                        vptr->mii_status = check_connection_type(regs);
1824
1825                        /*
1826                         *      If it is a 3119, disable frame bursting in
1827                         *      halfduplex mode and enable it in fullduplex
1828                         *       mode
1829                         */
1830                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1831                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1832                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1833                                else
1834                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1835                        }
1836                        /*
1837                         *      Only enable CD heart beat counter in 10HD mode
1838                         */
1839                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1840                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1841                        else
1842                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1843
1844                        setup_queue_timers(vptr);
1845                }
1846                /*
1847                 *      Get link status from PHYSR0
1848                 */
1849                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1850
1851                if (linked) {
1852                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1853                        netif_carrier_on(vptr->dev);
1854                } else {
1855                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1856                        netif_carrier_off(vptr->dev);
1857                }
1858
1859                velocity_print_link_status(vptr);
1860                enable_flow_control_ability(vptr);
1861
1862                /*
1863                 *      Re-enable auto-polling because SRCI will disable
1864                 *      auto-polling
1865                 */
1866
1867                enable_mii_autopoll(regs);
1868
1869                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1870                        netif_stop_queue(vptr->dev);
1871                else
1872                        netif_wake_queue(vptr->dev);
1873
1874        }
1875        if (status & ISR_MIBFI)
1876                velocity_update_hw_mibs(vptr);
1877        if (status & ISR_LSTEI)
1878                mac_rx_queue_wake(vptr->mac_regs);
1879}
1880
1881/**
1882 *      tx_srv          -       transmit interrupt service
1883 *      @vptr; Velocity
1884 *
1885 *      Scan the queues looking for transmitted packets that
1886 *      we can complete and clean up. Update any statistics as
1887 *      necessary/
1888 */
1889static int velocity_tx_srv(struct velocity_info *vptr)
1890{
1891        struct tx_desc *td;
1892        int qnum;
1893        int full = 0;
1894        int idx;
1895        int works = 0;
1896        struct velocity_td_info *tdinfo;
1897        struct net_device_stats *stats = &vptr->dev->stats;
1898
1899        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1900                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1901                        idx = (idx + 1) % vptr->options.numtx) {
1902
1903                        /*
1904                         *      Get Tx Descriptor
1905                         */
1906                        td = &(vptr->tx.rings[qnum][idx]);
1907                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1908
1909                        if (td->tdesc0.len & OWNED_BY_NIC)
1910                                break;
1911
1912                        if ((works++ > 15))
1913                                break;
1914
1915                        if (td->tdesc0.TSR & TSR0_TERR) {
1916                                stats->tx_errors++;
1917                                stats->tx_dropped++;
1918                                if (td->tdesc0.TSR & TSR0_CDH)
1919                                        stats->tx_heartbeat_errors++;
1920                                if (td->tdesc0.TSR & TSR0_CRS)
1921                                        stats->tx_carrier_errors++;
1922                                if (td->tdesc0.TSR & TSR0_ABT)
1923                                        stats->tx_aborted_errors++;
1924                                if (td->tdesc0.TSR & TSR0_OWC)
1925                                        stats->tx_window_errors++;
1926                        } else {
1927                                stats->tx_packets++;
1928                                stats->tx_bytes += tdinfo->skb->len;
1929                        }
1930                        velocity_free_tx_buf(vptr, tdinfo, td);
1931                        vptr->tx.used[qnum]--;
1932                }
1933                vptr->tx.tail[qnum] = idx;
1934
1935                if (AVAIL_TD(vptr, qnum) < 1)
1936                        full = 1;
1937        }
1938        /*
1939         *      Look to see if we should kick the transmit network
1940         *      layer for more work.
1941         */
1942        if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1943            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1944                netif_wake_queue(vptr->dev);
1945        }
1946        return works;
1947}
1948
1949/**
1950 *      velocity_rx_csum        -       checksum process
1951 *      @rd: receive packet descriptor
1952 *      @skb: network layer packet buffer
1953 *
1954 *      Process the status bits for the received packet and determine
1955 *      if the checksum was computed and verified by the hardware
1956 */
1957static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1958{
1959        skb_checksum_none_assert(skb);
1960
1961        if (rd->rdesc1.CSM & CSM_IPKT) {
1962                if (rd->rdesc1.CSM & CSM_IPOK) {
1963                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1964                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1965                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1966                                        return;
1967                        }
1968                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1969                }
1970        }
1971}
1972
1973/**
1974 *      velocity_rx_copy        -       in place Rx copy for small packets
1975 *      @rx_skb: network layer packet buffer candidate
1976 *      @pkt_size: received data size
1977 *      @rd: receive packet descriptor
1978 *      @dev: network device
1979 *
1980 *      Replace the current skb that is scheduled for Rx processing by a
1981 *      shorter, immediately allocated skb, if the received packet is small
1982 *      enough. This function returns a negative value if the received
1983 *      packet is too big or if memory is exhausted.
1984 */
1985static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1986                            struct velocity_info *vptr)
1987{
1988        int ret = -1;
1989        if (pkt_size < rx_copybreak) {
1990                struct sk_buff *new_skb;
1991
1992                new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1993                if (new_skb) {
1994                        new_skb->ip_summed = rx_skb[0]->ip_summed;
1995                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1996                        *rx_skb = new_skb;
1997                        ret = 0;
1998                }
1999
2000        }
2001        return ret;
2002}
2003
2004/**
2005 *      velocity_iph_realign    -       IP header alignment
2006 *      @vptr: velocity we are handling
2007 *      @skb: network layer packet buffer
2008 *      @pkt_size: received data size
2009 *
2010 *      Align IP header on a 2 bytes boundary. This behavior can be
2011 *      configured by the user.
2012 */
2013static inline void velocity_iph_realign(struct velocity_info *vptr,
2014                                        struct sk_buff *skb, int pkt_size)
2015{
2016        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2017                memmove(skb->data + 2, skb->data, pkt_size);
2018                skb_reserve(skb, 2);
2019        }
2020}
2021
2022/**
2023 *      velocity_receive_frame  -       received packet processor
2024 *      @vptr: velocity we are handling
2025 *      @idx: ring index
2026 *
2027 *      A packet has arrived. We process the packet and if appropriate
2028 *      pass the frame up the network stack
2029 */
2030static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2031{
2032        void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2033        struct net_device_stats *stats = &vptr->dev->stats;
2034        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2035        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2036        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2037        struct sk_buff *skb;
2038
2039        if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2040                VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2041                stats->rx_length_errors++;
2042                return -EINVAL;
2043        }
2044
2045        if (rd->rdesc0.RSR & RSR_MAR)
2046                stats->multicast++;
2047
2048        skb = rd_info->skb;
2049
2050        pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2051                                    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2052
2053        /*
2054         *      Drop frame not meeting IEEE 802.3
2055         */
2056
2057        if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2058                if (rd->rdesc0.RSR & RSR_RL) {
2059                        stats->rx_length_errors++;
2060                        return -EINVAL;
2061                }
2062        }
2063
2064        pci_action = pci_dma_sync_single_for_device;
2065
2066        velocity_rx_csum(rd, skb);
2067
2068        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2069                velocity_iph_realign(vptr, skb, pkt_len);
2070                pci_action = pci_unmap_single;
2071                rd_info->skb = NULL;
2072        }
2073
2074        pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2075                   PCI_DMA_FROMDEVICE);
2076
2077        skb_put(skb, pkt_len - 4);
2078        skb->protocol = eth_type_trans(skb, vptr->dev);
2079
2080        if (rd->rdesc0.RSR & RSR_DETAG) {
2081                u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2082
2083                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2084        }
2085        netif_rx(skb);
2086
2087        stats->rx_bytes += pkt_len;
2088        stats->rx_packets++;
2089
2090        return 0;
2091}
2092
2093/**
2094 *      velocity_rx_srv         -       service RX interrupt
2095 *      @vptr: velocity
2096 *
2097 *      Walk the receive ring of the velocity adapter and remove
2098 *      any received packets from the receive queue. Hand the ring
2099 *      slots back to the adapter for reuse.
2100 */
2101static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2102{
2103        struct net_device_stats *stats = &vptr->dev->stats;
2104        int rd_curr = vptr->rx.curr;
2105        int works = 0;
2106
2107        while (works < budget_left) {
2108                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2109
2110                if (!vptr->rx.info[rd_curr].skb)
2111                        break;
2112
2113                if (rd->rdesc0.len & OWNED_BY_NIC)
2114                        break;
2115
2116                rmb();
2117
2118                /*
2119                 *      Don't drop CE or RL error frame although RXOK is off
2120                 */
2121                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2122                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2123                                stats->rx_dropped++;
2124                } else {
2125                        if (rd->rdesc0.RSR & RSR_CRC)
2126                                stats->rx_crc_errors++;
2127                        if (rd->rdesc0.RSR & RSR_FAE)
2128                                stats->rx_frame_errors++;
2129
2130                        stats->rx_dropped++;
2131                }
2132
2133                rd->size |= RX_INTEN;
2134
2135                rd_curr++;
2136                if (rd_curr >= vptr->options.numrx)
2137                        rd_curr = 0;
2138                works++;
2139        }
2140
2141        vptr->rx.curr = rd_curr;
2142
2143        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2144                velocity_give_many_rx_descs(vptr);
2145
2146        VAR_USED(stats);
2147        return works;
2148}
2149
2150static int velocity_poll(struct napi_struct *napi, int budget)
2151{
2152        struct velocity_info *vptr = container_of(napi,
2153                        struct velocity_info, napi);
2154        unsigned int rx_done;
2155        unsigned long flags;
2156
2157        spin_lock_irqsave(&vptr->lock, flags);
2158        /*
2159         * Do rx and tx twice for performance (taken from the VIA
2160         * out-of-tree driver).
2161         */
2162        rx_done = velocity_rx_srv(vptr, budget / 2);
2163        velocity_tx_srv(vptr);
2164        rx_done += velocity_rx_srv(vptr, budget - rx_done);
2165        velocity_tx_srv(vptr);
2166
2167        /* If budget not fully consumed, exit the polling mode */
2168        if (rx_done < budget) {
2169                napi_complete(napi);
2170                mac_enable_int(vptr->mac_regs);
2171        }
2172        spin_unlock_irqrestore(&vptr->lock, flags);
2173
2174        return rx_done;
2175}
2176
2177/**
2178 *      velocity_intr           -       interrupt callback
2179 *      @irq: interrupt number
2180 *      @dev_instance: interrupting device
2181 *
2182 *      Called whenever an interrupt is generated by the velocity
2183 *      adapter IRQ line. We may not be the source of the interrupt
2184 *      and need to identify initially if we are, and if not exit as
2185 *      efficiently as possible.
2186 */
2187static irqreturn_t velocity_intr(int irq, void *dev_instance)
2188{
2189        struct net_device *dev = dev_instance;
2190        struct velocity_info *vptr = netdev_priv(dev);
2191        u32 isr_status;
2192
2193        spin_lock(&vptr->lock);
2194        isr_status = mac_read_isr(vptr->mac_regs);
2195
2196        /* Not us ? */
2197        if (isr_status == 0) {
2198                spin_unlock(&vptr->lock);
2199                return IRQ_NONE;
2200        }
2201
2202        /* Ack the interrupt */
2203        mac_write_isr(vptr->mac_regs, isr_status);
2204
2205        if (likely(napi_schedule_prep(&vptr->napi))) {
2206                mac_disable_int(vptr->mac_regs);
2207                __napi_schedule(&vptr->napi);
2208        }
2209
2210        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2211                velocity_error(vptr, isr_status);
2212
2213        spin_unlock(&vptr->lock);
2214
2215        return IRQ_HANDLED;
2216}
2217
2218/**
2219 *      velocity_open           -       interface activation callback
2220 *      @dev: network layer device to open
2221 *
2222 *      Called when the network layer brings the interface up. Returns
2223 *      a negative posix error code on failure, or zero on success.
2224 *
2225 *      All the ring allocation and set up is done on open for this
2226 *      adapter to minimise memory usage when inactive
2227 */
2228static int velocity_open(struct net_device *dev)
2229{
2230        struct velocity_info *vptr = netdev_priv(dev);
2231        int ret;
2232
2233        ret = velocity_init_rings(vptr, dev->mtu);
2234        if (ret < 0)
2235                goto out;
2236
2237        /* Ensure chip is running */
2238        pci_set_power_state(vptr->pdev, PCI_D0);
2239
2240        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2241
2242        ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2243                          dev->name, dev);
2244        if (ret < 0) {
2245                /* Power down the chip */
2246                pci_set_power_state(vptr->pdev, PCI_D3hot);
2247                velocity_free_rings(vptr);
2248                goto out;
2249        }
2250
2251        velocity_give_many_rx_descs(vptr);
2252
2253        mac_enable_int(vptr->mac_regs);
2254        netif_start_queue(dev);
2255        napi_enable(&vptr->napi);
2256        vptr->flags |= VELOCITY_FLAGS_OPENED;
2257out:
2258        return ret;
2259}
2260
2261/**
2262 *      velocity_shutdown       -       shut down the chip
2263 *      @vptr: velocity to deactivate
2264 *
2265 *      Shuts down the internal operations of the velocity and
2266 *      disables interrupts, autopolling, transmit and receive
2267 */
2268static void velocity_shutdown(struct velocity_info *vptr)
2269{
2270        struct mac_regs __iomem *regs = vptr->mac_regs;
2271        mac_disable_int(regs);
2272        writel(CR0_STOP, &regs->CR0Set);
2273        writew(0xFFFF, &regs->TDCSRClr);
2274        writeb(0xFF, &regs->RDCSRClr);
2275        safe_disable_mii_autopoll(regs);
2276        mac_clear_isr(regs);
2277}
2278
2279/**
2280 *      velocity_change_mtu     -       MTU change callback
2281 *      @dev: network device
2282 *      @new_mtu: desired MTU
2283 *
2284 *      Handle requests from the networking layer for MTU change on
2285 *      this interface. It gets called on a change by the network layer.
2286 *      Return zero for success or negative posix error code.
2287 */
2288static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2289{
2290        struct velocity_info *vptr = netdev_priv(dev);
2291        int ret = 0;
2292
2293        if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2294                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2295                                vptr->dev->name);
2296                ret = -EINVAL;
2297                goto out_0;
2298        }
2299
2300        if (!netif_running(dev)) {
2301                dev->mtu = new_mtu;
2302                goto out_0;
2303        }
2304
2305        if (dev->mtu != new_mtu) {
2306                struct velocity_info *tmp_vptr;
2307                unsigned long flags;
2308                struct rx_info rx;
2309                struct tx_info tx;
2310
2311                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2312                if (!tmp_vptr) {
2313                        ret = -ENOMEM;
2314                        goto out_0;
2315                }
2316
2317                tmp_vptr->dev = dev;
2318                tmp_vptr->pdev = vptr->pdev;
2319                tmp_vptr->options = vptr->options;
2320                tmp_vptr->tx.numq = vptr->tx.numq;
2321
2322                ret = velocity_init_rings(tmp_vptr, new_mtu);
2323                if (ret < 0)
2324                        goto out_free_tmp_vptr_1;
2325
2326                spin_lock_irqsave(&vptr->lock, flags);
2327
2328                netif_stop_queue(dev);
2329                velocity_shutdown(vptr);
2330
2331                rx = vptr->rx;
2332                tx = vptr->tx;
2333
2334                vptr->rx = tmp_vptr->rx;
2335                vptr->tx = tmp_vptr->tx;
2336
2337                tmp_vptr->rx = rx;
2338                tmp_vptr->tx = tx;
2339
2340                dev->mtu = new_mtu;
2341
2342                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2343
2344                velocity_give_many_rx_descs(vptr);
2345
2346                mac_enable_int(vptr->mac_regs);
2347                netif_start_queue(dev);
2348
2349                spin_unlock_irqrestore(&vptr->lock, flags);
2350
2351                velocity_free_rings(tmp_vptr);
2352
2353out_free_tmp_vptr_1:
2354                kfree(tmp_vptr);
2355        }
2356out_0:
2357        return ret;
2358}
2359
2360/**
2361 *      velocity_mii_ioctl              -       MII ioctl handler
2362 *      @dev: network device
2363 *      @ifr: the ifreq block for the ioctl
2364 *      @cmd: the command
2365 *
2366 *      Process MII requests made via ioctl from the network layer. These
2367 *      are used by tools like kudzu to interrogate the link state of the
2368 *      hardware
2369 */
2370static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2371{
2372        struct velocity_info *vptr = netdev_priv(dev);
2373        struct mac_regs __iomem *regs = vptr->mac_regs;
2374        unsigned long flags;
2375        struct mii_ioctl_data *miidata = if_mii(ifr);
2376        int err;
2377
2378        switch (cmd) {
2379        case SIOCGMIIPHY:
2380                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2381                break;
2382        case SIOCGMIIREG:
2383                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2384                        return -ETIMEDOUT;
2385                break;
2386        case SIOCSMIIREG:
2387                spin_lock_irqsave(&vptr->lock, flags);
2388                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2389                spin_unlock_irqrestore(&vptr->lock, flags);
2390                check_connection_type(vptr->mac_regs);
2391                if (err)
2392                        return err;
2393                break;
2394        default:
2395                return -EOPNOTSUPP;
2396        }
2397        return 0;
2398}
2399
2400/**
2401 *      velocity_ioctl          -       ioctl entry point
2402 *      @dev: network device
2403 *      @rq: interface request ioctl
2404 *      @cmd: command code
2405 *
2406 *      Called when the user issues an ioctl request to the network
2407 *      device in question. The velocity interface supports MII.
2408 */
2409static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2410{
2411        struct velocity_info *vptr = netdev_priv(dev);
2412        int ret;
2413
2414        /* If we are asked for information and the device is power
2415           saving then we need to bring the device back up to talk to it */
2416
2417        if (!netif_running(dev))
2418                pci_set_power_state(vptr->pdev, PCI_D0);
2419
2420        switch (cmd) {
2421        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2422        case SIOCGMIIREG:       /* Read MII PHY register. */
2423        case SIOCSMIIREG:       /* Write to MII PHY register. */
2424                ret = velocity_mii_ioctl(dev, rq, cmd);
2425                break;
2426
2427        default:
2428                ret = -EOPNOTSUPP;
2429        }
2430        if (!netif_running(dev))
2431                pci_set_power_state(vptr->pdev, PCI_D3hot);
2432
2433
2434        return ret;
2435}
2436
2437/**
2438 *      velocity_get_status     -       statistics callback
2439 *      @dev: network device
2440 *
2441 *      Callback from the network layer to allow driver statistics
2442 *      to be resynchronized with hardware collected state. In the
2443 *      case of the velocity we need to pull the MIB counters from
2444 *      the hardware into the counters before letting the network
2445 *      layer display them.
2446 */
2447static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2448{
2449        struct velocity_info *vptr = netdev_priv(dev);
2450
2451        /* If the hardware is down, don't touch MII */
2452        if (!netif_running(dev))
2453                return &dev->stats;
2454
2455        spin_lock_irq(&vptr->lock);
2456        velocity_update_hw_mibs(vptr);
2457        spin_unlock_irq(&vptr->lock);
2458
2459        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2460        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2461        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2462
2463//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2464        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2465        /* detailed rx_errors: */
2466//  unsigned long   rx_length_errors;
2467//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2468        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2469//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2470//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2471//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2472
2473        /* detailed tx_errors */
2474//  unsigned long   tx_fifo_errors;
2475
2476        return &dev->stats;
2477}
2478
2479/**
2480 *      velocity_close          -       close adapter callback
2481 *      @dev: network device
2482 *
2483 *      Callback from the network layer when the velocity is being
2484 *      deactivated by the network layer
2485 */
2486static int velocity_close(struct net_device *dev)
2487{
2488        struct velocity_info *vptr = netdev_priv(dev);
2489
2490        napi_disable(&vptr->napi);
2491        netif_stop_queue(dev);
2492        velocity_shutdown(vptr);
2493
2494        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2495                velocity_get_ip(vptr);
2496
2497        free_irq(vptr->pdev->irq, dev);
2498
2499        velocity_free_rings(vptr);
2500
2501        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2502        return 0;
2503}
2504
2505/**
2506 *      velocity_xmit           -       transmit packet callback
2507 *      @skb: buffer to transmit
2508 *      @dev: network device
2509 *
2510 *      Called by the networ layer to request a packet is queued to
2511 *      the velocity. Returns zero on success.
2512 */
2513static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2514                                 struct net_device *dev)
2515{
2516        struct velocity_info *vptr = netdev_priv(dev);
2517        int qnum = 0;
2518        struct tx_desc *td_ptr;
2519        struct velocity_td_info *tdinfo;
2520        unsigned long flags;
2521        int pktlen;
2522        int index, prev;
2523        int i = 0;
2524
2525        if (skb_padto(skb, ETH_ZLEN))
2526                goto out;
2527
2528        /* The hardware can handle at most 7 memory segments, so merge
2529         * the skb if there are more */
2530        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2531                kfree_skb(skb);
2532                return NETDEV_TX_OK;
2533        }
2534
2535        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2536                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2537                                skb_headlen(skb);
2538
2539        spin_lock_irqsave(&vptr->lock, flags);
2540
2541        index = vptr->tx.curr[qnum];
2542        td_ptr = &(vptr->tx.rings[qnum][index]);
2543        tdinfo = &(vptr->tx.infos[qnum][index]);
2544
2545        td_ptr->tdesc1.TCR = TCR0_TIC;
2546        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2547
2548        /*
2549         *      Map the linear network buffer into PCI space and
2550         *      add it to the transmit ring.
2551         */
2552        tdinfo->skb = skb;
2553        tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2554        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2555        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2556        td_ptr->td_buf[0].pa_high = 0;
2557        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2558
2559        /* Handle fragments */
2560        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562
2563                tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
2564                                                          frag, 0,
2565                                                          skb_frag_size(frag),
2566                                                          DMA_TO_DEVICE);
2567
2568                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2569                td_ptr->td_buf[i + 1].pa_high = 0;
2570                td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2571        }
2572        tdinfo->nskb_dma = i + 1;
2573
2574        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2575
2576        if (vlan_tx_tag_present(skb)) {
2577                td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2578                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2579        }
2580
2581        /*
2582         *      Handle hardware checksum
2583         */
2584        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2585                const struct iphdr *ip = ip_hdr(skb);
2586                if (ip->protocol == IPPROTO_TCP)
2587                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2588                else if (ip->protocol == IPPROTO_UDP)
2589                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2590                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2591        }
2592
2593        prev = index - 1;
2594        if (prev < 0)
2595                prev = vptr->options.numtx - 1;
2596        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2597        vptr->tx.used[qnum]++;
2598        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2599
2600        if (AVAIL_TD(vptr, qnum) < 1)
2601                netif_stop_queue(dev);
2602
2603        td_ptr = &(vptr->tx.rings[qnum][prev]);
2604        td_ptr->td_buf[0].size |= TD_QUEUE;
2605        mac_tx_queue_wake(vptr->mac_regs, qnum);
2606
2607        spin_unlock_irqrestore(&vptr->lock, flags);
2608out:
2609        return NETDEV_TX_OK;
2610}
2611
2612static const struct net_device_ops velocity_netdev_ops = {
2613        .ndo_open               = velocity_open,
2614        .ndo_stop               = velocity_close,
2615        .ndo_start_xmit         = velocity_xmit,
2616        .ndo_get_stats          = velocity_get_stats,
2617        .ndo_validate_addr      = eth_validate_addr,
2618        .ndo_set_mac_address    = eth_mac_addr,
2619        .ndo_set_rx_mode        = velocity_set_multi,
2620        .ndo_change_mtu         = velocity_change_mtu,
2621        .ndo_do_ioctl           = velocity_ioctl,
2622        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2623        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2624};
2625
2626/**
2627 *      velocity_init_info      -       init private data
2628 *      @pdev: PCI device
2629 *      @vptr: Velocity info
2630 *      @info: Board type
2631 *
2632 *      Set up the initial velocity_info struct for the device that has been
2633 *      discovered.
2634 */
2635static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
2636                               const struct velocity_info_tbl *info)
2637{
2638        memset(vptr, 0, sizeof(struct velocity_info));
2639
2640        vptr->pdev = pdev;
2641        vptr->chip_id = info->chip_id;
2642        vptr->tx.numq = info->txqueue;
2643        vptr->multicast_limit = MCAM_SIZE;
2644        spin_lock_init(&vptr->lock);
2645}
2646
2647/**
2648 *      velocity_get_pci_info   -       retrieve PCI info for device
2649 *      @vptr: velocity device
2650 *      @pdev: PCI device it matches
2651 *
2652 *      Retrieve the PCI configuration space data that interests us from
2653 *      the kernel PCI layer
2654 */
2655static int velocity_get_pci_info(struct velocity_info *vptr,
2656                                 struct pci_dev *pdev)
2657{
2658        vptr->rev_id = pdev->revision;
2659
2660        pci_set_master(pdev);
2661
2662        vptr->ioaddr = pci_resource_start(pdev, 0);
2663        vptr->memaddr = pci_resource_start(pdev, 1);
2664
2665        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2666                dev_err(&pdev->dev,
2667                           "region #0 is not an I/O resource, aborting.\n");
2668                return -EINVAL;
2669        }
2670
2671        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2672                dev_err(&pdev->dev,
2673                           "region #1 is an I/O resource, aborting.\n");
2674                return -EINVAL;
2675        }
2676
2677        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2678                dev_err(&pdev->dev, "region #1 is too small.\n");
2679                return -EINVAL;
2680        }
2681        vptr->pdev = pdev;
2682
2683        return 0;
2684}
2685
2686/**
2687 *      velocity_print_info     -       per driver data
2688 *      @vptr: velocity
2689 *
2690 *      Print per driver data as the kernel driver finds Velocity
2691 *      hardware
2692 */
2693static void velocity_print_info(struct velocity_info *vptr)
2694{
2695        struct net_device *dev = vptr->dev;
2696
2697        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2698        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2699                dev->name, dev->dev_addr);
2700}
2701
2702static u32 velocity_get_link(struct net_device *dev)
2703{
2704        struct velocity_info *vptr = netdev_priv(dev);
2705        struct mac_regs __iomem *regs = vptr->mac_regs;
2706        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2707}
2708
2709/**
2710 *      velocity_found1         -       set up discovered velocity card
2711 *      @pdev: PCI device
2712 *      @ent: PCI device table entry that matched
2713 *
2714 *      Configure a discovered adapter from scratch. Return a negative
2715 *      errno error code on failure paths.
2716 */
2717static int velocity_found1(struct pci_dev *pdev,
2718                           const struct pci_device_id *ent)
2719{
2720        static int first = 1;
2721        struct net_device *dev;
2722        int i;
2723        const char *drv_string;
2724        const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2725        struct velocity_info *vptr;
2726        struct mac_regs __iomem *regs;
2727        int ret = -ENOMEM;
2728
2729        /* FIXME: this driver, like almost all other ethernet drivers,
2730         * can support more than MAX_UNITS.
2731         */
2732        if (velocity_nics >= MAX_UNITS) {
2733                dev_notice(&pdev->dev, "already found %d NICs.\n",
2734                           velocity_nics);
2735                return -ENODEV;
2736        }
2737
2738        dev = alloc_etherdev(sizeof(struct velocity_info));
2739        if (!dev)
2740                goto out;
2741
2742        /* Chain it all together */
2743
2744        SET_NETDEV_DEV(dev, &pdev->dev);
2745        vptr = netdev_priv(dev);
2746
2747
2748        if (first) {
2749                printk(KERN_INFO "%s Ver. %s\n",
2750                        VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2751                printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2752                printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2753                first = 0;
2754        }
2755
2756        velocity_init_info(pdev, vptr, info);
2757
2758        vptr->dev = dev;
2759
2760        ret = pci_enable_device(pdev);
2761        if (ret < 0)
2762                goto err_free_dev;
2763
2764        ret = velocity_get_pci_info(vptr, pdev);
2765        if (ret < 0) {
2766                /* error message already printed */
2767                goto err_disable;
2768        }
2769
2770        ret = pci_request_regions(pdev, VELOCITY_NAME);
2771        if (ret < 0) {
2772                dev_err(&pdev->dev, "No PCI resources.\n");
2773                goto err_disable;
2774        }
2775
2776        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2777        if (regs == NULL) {
2778                ret = -EIO;
2779                goto err_release_res;
2780        }
2781
2782        vptr->mac_regs = regs;
2783
2784        mac_wol_reset(regs);
2785
2786        for (i = 0; i < 6; i++)
2787                dev->dev_addr[i] = readb(&regs->PAR[i]);
2788
2789
2790        drv_string = dev_driver_string(&pdev->dev);
2791
2792        velocity_get_options(&vptr->options, velocity_nics, drv_string);
2793
2794        /*
2795         *      Mask out the options cannot be set to the chip
2796         */
2797
2798        vptr->options.flags &= info->flags;
2799
2800        /*
2801         *      Enable the chip specified capbilities
2802         */
2803
2804        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2805
2806        vptr->wol_opts = vptr->options.wol_opts;
2807        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2808
2809        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2810
2811        dev->netdev_ops = &velocity_netdev_ops;
2812        dev->ethtool_ops = &velocity_ethtool_ops;
2813        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2814
2815        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2816                           NETIF_F_HW_VLAN_CTAG_TX;
2817        dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER |
2818                         NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM;
2819
2820        ret = register_netdev(dev);
2821        if (ret < 0)
2822                goto err_iounmap;
2823
2824        if (!velocity_get_link(dev)) {
2825                netif_carrier_off(dev);
2826                vptr->mii_status |= VELOCITY_LINK_FAIL;
2827        }
2828
2829        velocity_print_info(vptr);
2830        pci_set_drvdata(pdev, dev);
2831
2832        /* and leave the chip powered down */
2833
2834        pci_set_power_state(pdev, PCI_D3hot);
2835        velocity_nics++;
2836out:
2837        return ret;
2838
2839err_iounmap:
2840        iounmap(regs);
2841err_release_res:
2842        pci_release_regions(pdev);
2843err_disable:
2844        pci_disable_device(pdev);
2845err_free_dev:
2846        free_netdev(dev);
2847        goto out;
2848}
2849
2850#ifdef CONFIG_PM
2851/**
2852 *      wol_calc_crc            -       WOL CRC
2853 *      @pattern: data pattern
2854 *      @mask_pattern: mask
2855 *
2856 *      Compute the wake on lan crc hashes for the packet header
2857 *      we are interested in.
2858 */
2859static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2860{
2861        u16 crc = 0xFFFF;
2862        u8 mask;
2863        int i, j;
2864
2865        for (i = 0; i < size; i++) {
2866                mask = mask_pattern[i];
2867
2868                /* Skip this loop if the mask equals to zero */
2869                if (mask == 0x00)
2870                        continue;
2871
2872                for (j = 0; j < 8; j++) {
2873                        if ((mask & 0x01) == 0) {
2874                                mask >>= 1;
2875                                continue;
2876                        }
2877                        mask >>= 1;
2878                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2879                }
2880        }
2881        /*      Finally, invert the result once to get the correct data */
2882        crc = ~crc;
2883        return bitrev32(crc) >> 16;
2884}
2885
2886/**
2887 *      velocity_set_wol        -       set up for wake on lan
2888 *      @vptr: velocity to set WOL status on
2889 *
2890 *      Set a card up for wake on lan either by unicast or by
2891 *      ARP packet.
2892 *
2893 *      FIXME: check static buffer is safe here
2894 */
2895static int velocity_set_wol(struct velocity_info *vptr)
2896{
2897        struct mac_regs __iomem *regs = vptr->mac_regs;
2898        enum speed_opt spd_dpx = vptr->options.spd_dpx;
2899        static u8 buf[256];
2900        int i;
2901
2902        static u32 mask_pattern[2][4] = {
2903                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2904                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
2905        };
2906
2907        writew(0xFFFF, &regs->WOLCRClr);
2908        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2909        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2910
2911        /*
2912           if (vptr->wol_opts & VELOCITY_WOL_PHY)
2913           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2914         */
2915
2916        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2917                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2918
2919        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2920                struct arp_packet *arp = (struct arp_packet *) buf;
2921                u16 crc;
2922                memset(buf, 0, sizeof(struct arp_packet) + 7);
2923
2924                for (i = 0; i < 4; i++)
2925                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2926
2927                arp->type = htons(ETH_P_ARP);
2928                arp->ar_op = htons(1);
2929
2930                memcpy(arp->ar_tip, vptr->ip_addr, 4);
2931
2932                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2933                                (u8 *) & mask_pattern[0][0]);
2934
2935                writew(crc, &regs->PatternCRC[0]);
2936                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2937        }
2938
2939        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2940        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2941
2942        writew(0x0FFF, &regs->WOLSRClr);
2943
2944        if (spd_dpx == SPD_DPX_1000_FULL)
2945                goto mac_done;
2946
2947        if (spd_dpx != SPD_DPX_AUTO)
2948                goto advertise_done;
2949
2950        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2951                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2952                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2953
2954                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2955        }
2956
2957        if (vptr->mii_status & VELOCITY_SPEED_1000)
2958                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2959
2960advertise_done:
2961        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2962
2963        {
2964                u8 GCR;
2965                GCR = readb(&regs->CHIPGCR);
2966                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2967                writeb(GCR, &regs->CHIPGCR);
2968        }
2969
2970mac_done:
2971        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2972        /* Turn on SWPTAG just before entering power mode */
2973        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2974        /* Go to bed ..... */
2975        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2976
2977        return 0;
2978}
2979
2980/**
2981 *      velocity_save_context   -       save registers
2982 *      @vptr: velocity
2983 *      @context: buffer for stored context
2984 *
2985 *      Retrieve the current configuration from the velocity hardware
2986 *      and stash it in the context structure, for use by the context
2987 *      restore functions. This allows us to save things we need across
2988 *      power down states
2989 */
2990static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2991{
2992        struct mac_regs __iomem *regs = vptr->mac_regs;
2993        u16 i;
2994        u8 __iomem *ptr = (u8 __iomem *)regs;
2995
2996        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2997                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2998
2999        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3000                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3001
3002        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3003                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3004
3005}
3006
3007static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3008{
3009        struct net_device *dev = pci_get_drvdata(pdev);
3010        struct velocity_info *vptr = netdev_priv(dev);
3011        unsigned long flags;
3012
3013        if (!netif_running(vptr->dev))
3014                return 0;
3015
3016        netif_device_detach(vptr->dev);
3017
3018        spin_lock_irqsave(&vptr->lock, flags);
3019        pci_save_state(pdev);
3020
3021        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3022                velocity_get_ip(vptr);
3023                velocity_save_context(vptr, &vptr->context);
3024                velocity_shutdown(vptr);
3025                velocity_set_wol(vptr);
3026                pci_enable_wake(pdev, PCI_D3hot, 1);
3027                pci_set_power_state(pdev, PCI_D3hot);
3028        } else {
3029                velocity_save_context(vptr, &vptr->context);
3030                velocity_shutdown(vptr);
3031                pci_disable_device(pdev);
3032                pci_set_power_state(pdev, pci_choose_state(pdev, state));
3033        }
3034
3035        spin_unlock_irqrestore(&vptr->lock, flags);
3036        return 0;
3037}
3038
3039/**
3040 *      velocity_restore_context        -       restore registers
3041 *      @vptr: velocity
3042 *      @context: buffer for stored context
3043 *
3044 *      Reload the register configuration from the velocity context
3045 *      created by velocity_save_context.
3046 */
3047static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3048{
3049        struct mac_regs __iomem *regs = vptr->mac_regs;
3050        int i;
3051        u8 __iomem *ptr = (u8 __iomem *)regs;
3052
3053        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3054                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3055
3056        /* Just skip cr0 */
3057        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3058                /* Clear */
3059                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3060                /* Set */
3061                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3062        }
3063
3064        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3065                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3066
3067        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3068                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3069
3070        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3071                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3072}
3073
3074static int velocity_resume(struct pci_dev *pdev)
3075{
3076        struct net_device *dev = pci_get_drvdata(pdev);
3077        struct velocity_info *vptr = netdev_priv(dev);
3078        unsigned long flags;
3079        int i;
3080
3081        if (!netif_running(vptr->dev))
3082                return 0;
3083
3084        pci_set_power_state(pdev, PCI_D0);
3085        pci_enable_wake(pdev, 0, 0);
3086        pci_restore_state(pdev);
3087
3088        mac_wol_reset(vptr->mac_regs);
3089
3090        spin_lock_irqsave(&vptr->lock, flags);
3091        velocity_restore_context(vptr, &vptr->context);
3092        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3093        mac_disable_int(vptr->mac_regs);
3094
3095        velocity_tx_srv(vptr);
3096
3097        for (i = 0; i < vptr->tx.numq; i++) {
3098                if (vptr->tx.used[i])
3099                        mac_tx_queue_wake(vptr->mac_regs, i);
3100        }
3101
3102        mac_enable_int(vptr->mac_regs);
3103        spin_unlock_irqrestore(&vptr->lock, flags);
3104        netif_device_attach(vptr->dev);
3105
3106        return 0;
3107}
3108#endif
3109
3110/*
3111 *      Definition for our device driver. The PCI layer interface
3112 *      uses this to handle all our card discover and plugging
3113 */
3114static struct pci_driver velocity_driver = {
3115        .name           = VELOCITY_NAME,
3116        .id_table       = velocity_id_table,
3117        .probe          = velocity_found1,
3118        .remove         = velocity_remove1,
3119#ifdef CONFIG_PM
3120        .suspend        = velocity_suspend,
3121        .resume         = velocity_resume,
3122#endif
3123};
3124
3125
3126/**
3127 *      velocity_ethtool_up     -       pre hook for ethtool
3128 *      @dev: network device
3129 *
3130 *      Called before an ethtool operation. We need to make sure the
3131 *      chip is out of D3 state before we poke at it.
3132 */
3133static int velocity_ethtool_up(struct net_device *dev)
3134{
3135        struct velocity_info *vptr = netdev_priv(dev);
3136        if (!netif_running(dev))
3137                pci_set_power_state(vptr->pdev, PCI_D0);
3138        return 0;
3139}
3140
3141/**
3142 *      velocity_ethtool_down   -       post hook for ethtool
3143 *      @dev: network device
3144 *
3145 *      Called after an ethtool operation. Restore the chip back to D3
3146 *      state if it isn't running.
3147 */
3148static void velocity_ethtool_down(struct net_device *dev)
3149{
3150        struct velocity_info *vptr = netdev_priv(dev);
3151        if (!netif_running(dev))
3152                pci_set_power_state(vptr->pdev, PCI_D3hot);
3153}
3154
3155static int velocity_get_settings(struct net_device *dev,
3156                                 struct ethtool_cmd *cmd)
3157{
3158        struct velocity_info *vptr = netdev_priv(dev);
3159        struct mac_regs __iomem *regs = vptr->mac_regs;
3160        u32 status;
3161        status = check_connection_type(vptr->mac_regs);
3162
3163        cmd->supported = SUPPORTED_TP |
3164                        SUPPORTED_Autoneg |
3165                        SUPPORTED_10baseT_Half |
3166                        SUPPORTED_10baseT_Full |
3167                        SUPPORTED_100baseT_Half |
3168                        SUPPORTED_100baseT_Full |
3169                        SUPPORTED_1000baseT_Half |
3170                        SUPPORTED_1000baseT_Full;
3171
3172        cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3173        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3174                cmd->advertising |=
3175                        ADVERTISED_10baseT_Half |
3176                        ADVERTISED_10baseT_Full |
3177                        ADVERTISED_100baseT_Half |
3178                        ADVERTISED_100baseT_Full |
3179                        ADVERTISED_1000baseT_Half |
3180                        ADVERTISED_1000baseT_Full;
3181        } else {
3182                switch (vptr->options.spd_dpx) {
3183                case SPD_DPX_1000_FULL:
3184                        cmd->advertising |= ADVERTISED_1000baseT_Full;
3185                        break;
3186                case SPD_DPX_100_HALF:
3187                        cmd->advertising |= ADVERTISED_100baseT_Half;
3188                        break;
3189                case SPD_DPX_100_FULL:
3190                        cmd->advertising |= ADVERTISED_100baseT_Full;
3191                        break;
3192                case SPD_DPX_10_HALF:
3193                        cmd->advertising |= ADVERTISED_10baseT_Half;
3194                        break;
3195                case SPD_DPX_10_FULL:
3196                        cmd->advertising |= ADVERTISED_10baseT_Full;
3197                        break;
3198                default:
3199                        break;
3200                }
3201        }
3202
3203        if (status & VELOCITY_SPEED_1000)
3204                ethtool_cmd_speed_set(cmd, SPEED_1000);
3205        else if (status & VELOCITY_SPEED_100)
3206                ethtool_cmd_speed_set(cmd, SPEED_100);
3207        else
3208                ethtool_cmd_speed_set(cmd, SPEED_10);
3209
3210        cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3211        cmd->port = PORT_TP;
3212        cmd->transceiver = XCVR_INTERNAL;
3213        cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3214
3215        if (status & VELOCITY_DUPLEX_FULL)
3216                cmd->duplex = DUPLEX_FULL;
3217        else
3218                cmd->duplex = DUPLEX_HALF;
3219
3220        return 0;
3221}
3222
3223static int velocity_set_settings(struct net_device *dev,
3224                                 struct ethtool_cmd *cmd)
3225{
3226        struct velocity_info *vptr = netdev_priv(dev);
3227        u32 speed = ethtool_cmd_speed(cmd);
3228        u32 curr_status;
3229        u32 new_status = 0;
3230        int ret = 0;
3231
3232        curr_status = check_connection_type(vptr->mac_regs);
3233        curr_status &= (~VELOCITY_LINK_FAIL);
3234
3235        new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3236        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3237        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3238        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3239        new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3240
3241        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3242            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3243                ret = -EINVAL;
3244        } else {
3245                enum speed_opt spd_dpx;
3246
3247                if (new_status & VELOCITY_AUTONEG_ENABLE)
3248                        spd_dpx = SPD_DPX_AUTO;
3249                else if ((new_status & VELOCITY_SPEED_1000) &&
3250                         (new_status & VELOCITY_DUPLEX_FULL)) {
3251                        spd_dpx = SPD_DPX_1000_FULL;
3252                } else if (new_status & VELOCITY_SPEED_100)
3253                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3254                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3255                else if (new_status & VELOCITY_SPEED_10)
3256                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3257                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3258                else
3259                        return -EOPNOTSUPP;
3260
3261                vptr->options.spd_dpx = spd_dpx;
3262
3263                velocity_set_media_mode(vptr, new_status);
3264        }
3265
3266        return ret;
3267}
3268
3269static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3270{
3271        struct velocity_info *vptr = netdev_priv(dev);
3272        strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3273        strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3274        strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
3275}
3276
3277static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3278{
3279        struct velocity_info *vptr = netdev_priv(dev);
3280        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3281        wol->wolopts |= WAKE_MAGIC;
3282        /*
3283           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3284                   wol.wolopts|=WAKE_PHY;
3285                         */
3286        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3287                wol->wolopts |= WAKE_UCAST;
3288        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3289                wol->wolopts |= WAKE_ARP;
3290        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3291}
3292
3293static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3294{
3295        struct velocity_info *vptr = netdev_priv(dev);
3296
3297        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3298                return -EFAULT;
3299        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3300
3301        /*
3302           if (wol.wolopts & WAKE_PHY) {
3303           vptr->wol_opts|=VELOCITY_WOL_PHY;
3304           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3305           }
3306         */
3307
3308        if (wol->wolopts & WAKE_MAGIC) {
3309                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3310                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3311        }
3312        if (wol->wolopts & WAKE_UCAST) {
3313                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3314                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3315        }
3316        if (wol->wolopts & WAKE_ARP) {
3317                vptr->wol_opts |= VELOCITY_WOL_ARP;
3318                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3319        }
3320        memcpy(vptr->wol_passwd, wol->sopass, 6);
3321        return 0;
3322}
3323
3324static u32 velocity_get_msglevel(struct net_device *dev)
3325{
3326        return msglevel;
3327}
3328
3329static void velocity_set_msglevel(struct net_device *dev, u32 value)
3330{
3331         msglevel = value;
3332}
3333
3334static int get_pending_timer_val(int val)
3335{
3336        int mult_bits = val >> 6;
3337        int mult = 1;
3338
3339        switch (mult_bits)
3340        {
3341        case 1:
3342                mult = 4; break;
3343        case 2:
3344                mult = 16; break;
3345        case 3:
3346                mult = 64; break;
3347        case 0:
3348        default:
3349                break;
3350        }
3351
3352        return (val & 0x3f) * mult;
3353}
3354
3355static void set_pending_timer_val(int *val, u32 us)
3356{
3357        u8 mult = 0;
3358        u8 shift = 0;
3359
3360        if (us >= 0x3f) {
3361                mult = 1; /* mult with 4 */
3362                shift = 2;
3363        }
3364        if (us >= 0x3f * 4) {
3365                mult = 2; /* mult with 16 */
3366                shift = 4;
3367        }
3368        if (us >= 0x3f * 16) {
3369                mult = 3; /* mult with 64 */
3370                shift = 6;
3371        }
3372
3373        *val = (mult << 6) | ((us >> shift) & 0x3f);
3374}
3375
3376
3377static int velocity_get_coalesce(struct net_device *dev,
3378                struct ethtool_coalesce *ecmd)
3379{
3380        struct velocity_info *vptr = netdev_priv(dev);
3381
3382        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3383        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3384
3385        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3386        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3387
3388        return 0;
3389}
3390
3391static int velocity_set_coalesce(struct net_device *dev,
3392                struct ethtool_coalesce *ecmd)
3393{
3394        struct velocity_info *vptr = netdev_priv(dev);
3395        int max_us = 0x3f * 64;
3396        unsigned long flags;
3397
3398        /* 6 bits of  */
3399        if (ecmd->tx_coalesce_usecs > max_us)
3400                return -EINVAL;
3401        if (ecmd->rx_coalesce_usecs > max_us)
3402                return -EINVAL;
3403
3404        if (ecmd->tx_max_coalesced_frames > 0xff)
3405                return -EINVAL;
3406        if (ecmd->rx_max_coalesced_frames > 0xff)
3407                return -EINVAL;
3408
3409        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3410        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3411
3412        set_pending_timer_val(&vptr->options.rxqueue_timer,
3413                        ecmd->rx_coalesce_usecs);
3414        set_pending_timer_val(&vptr->options.txqueue_timer,
3415                        ecmd->tx_coalesce_usecs);
3416
3417        /* Setup the interrupt suppression and queue timers */
3418        spin_lock_irqsave(&vptr->lock, flags);
3419        mac_disable_int(vptr->mac_regs);
3420        setup_adaptive_interrupts(vptr);
3421        setup_queue_timers(vptr);
3422
3423        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3424        mac_clear_isr(vptr->mac_regs);
3425        mac_enable_int(vptr->mac_regs);
3426        spin_unlock_irqrestore(&vptr->lock, flags);
3427
3428        return 0;
3429}
3430
3431static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3432        "rx_all",
3433        "rx_ok",
3434        "tx_ok",
3435        "rx_error",
3436        "rx_runt_ok",
3437        "rx_runt_err",
3438        "rx_64",
3439        "tx_64",
3440        "rx_65_to_127",
3441        "tx_65_to_127",
3442        "rx_128_to_255",
3443        "tx_128_to_255",
3444        "rx_256_to_511",
3445        "tx_256_to_511",
3446        "rx_512_to_1023",
3447        "tx_512_to_1023",
3448        "rx_1024_to_1518",
3449        "tx_1024_to_1518",
3450        "tx_ether_collisions",
3451        "rx_crc_errors",
3452        "rx_jumbo",
3453        "tx_jumbo",
3454        "rx_mac_control_frames",
3455        "tx_mac_control_frames",
3456        "rx_frame_alignement_errors",
3457        "rx_long_ok",
3458        "rx_long_err",
3459        "tx_sqe_errors",
3460        "rx_no_buf",
3461        "rx_symbol_errors",
3462        "in_range_length_errors",
3463        "late_collisions"
3464};
3465
3466static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3467{
3468        switch (sset) {
3469        case ETH_SS_STATS:
3470                memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3471                break;
3472        }
3473}
3474
3475static int velocity_get_sset_count(struct net_device *dev, int sset)
3476{
3477        switch (sset) {
3478        case ETH_SS_STATS:
3479                return ARRAY_SIZE(velocity_gstrings);
3480        default:
3481                return -EOPNOTSUPP;
3482        }
3483}
3484
3485static void velocity_get_ethtool_stats(struct net_device *dev,
3486                                       struct ethtool_stats *stats, u64 *data)
3487{
3488        if (netif_running(dev)) {
3489                struct velocity_info *vptr = netdev_priv(dev);
3490                u32 *p = vptr->mib_counter;
3491                int i;
3492
3493                spin_lock_irq(&vptr->lock);
3494                velocity_update_hw_mibs(vptr);
3495                spin_unlock_irq(&vptr->lock);
3496
3497                for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3498                        *data++ = *p++;
3499        }
3500}
3501
3502static const struct ethtool_ops velocity_ethtool_ops = {
3503        .get_settings           = velocity_get_settings,
3504        .set_settings           = velocity_set_settings,
3505        .get_drvinfo            = velocity_get_drvinfo,
3506        .get_wol                = velocity_ethtool_get_wol,
3507        .set_wol                = velocity_ethtool_set_wol,
3508        .get_msglevel           = velocity_get_msglevel,
3509        .set_msglevel           = velocity_set_msglevel,
3510        .get_link               = velocity_get_link,
3511        .get_strings            = velocity_get_strings,
3512        .get_sset_count         = velocity_get_sset_count,
3513        .get_ethtool_stats      = velocity_get_ethtool_stats,
3514        .get_coalesce           = velocity_get_coalesce,
3515        .set_coalesce           = velocity_set_coalesce,
3516        .begin                  = velocity_ethtool_up,
3517        .complete               = velocity_ethtool_down
3518};
3519
3520#if defined(CONFIG_PM) && defined(CONFIG_INET)
3521static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3522{
3523        struct in_ifaddr *ifa = ptr;
3524        struct net_device *dev = ifa->ifa_dev->dev;
3525
3526        if (dev_net(dev) == &init_net &&
3527            dev->netdev_ops == &velocity_netdev_ops)
3528                velocity_get_ip(netdev_priv(dev));
3529
3530        return NOTIFY_DONE;
3531}
3532
3533static struct notifier_block velocity_inetaddr_notifier = {
3534        .notifier_call  = velocity_netdev_event,
3535};
3536
3537static void velocity_register_notifier(void)
3538{
3539        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3540}
3541
3542static void velocity_unregister_notifier(void)
3543{
3544        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3545}
3546
3547#else
3548
3549#define velocity_register_notifier()    do {} while (0)
3550#define velocity_unregister_notifier()  do {} while (0)
3551
3552#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3553
3554/**
3555 *      velocity_init_module    -       load time function
3556 *
3557 *      Called when the velocity module is loaded. The PCI driver
3558 *      is registered with the PCI layer, and in turn will call
3559 *      the probe functions for each velocity adapter installed
3560 *      in the system.
3561 */
3562static int __init velocity_init_module(void)
3563{
3564        int ret;
3565
3566        velocity_register_notifier();
3567        ret = pci_register_driver(&velocity_driver);
3568        if (ret < 0)
3569                velocity_unregister_notifier();
3570        return ret;
3571}
3572
3573/**
3574 *      velocity_cleanup        -       module unload
3575 *
3576 *      When the velocity hardware is unloaded this function is called.
3577 *      It will clean up the notifiers and the unregister the PCI
3578 *      driver interface for this hardware. This in turn cleans up
3579 *      all discovered interfaces before returning from the function
3580 */
3581static void __exit velocity_cleanup_module(void)
3582{
3583        velocity_unregister_notifier();
3584        pci_unregister_driver(&velocity_driver);
3585}
3586
3587module_init(velocity_init_module);
3588module_exit(velocity_cleanup_module);
3589