linux/drivers/net/via-velocity.c
<<
>>
Prefs
   1/*
   2 * This code is derived from the VIA reference driver (copyright message
   3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   4 * addition to the Linux kernel.
   5 *
   6 * The code has been merged into one source file, cleaned up to follow
   7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   8 * for 64bit hardware platforms.
   9 *
  10 * TODO
  11 *      rx_copybreak/alignment
  12 *      More testing
  13 *
  14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  15 * Additional fixes and clean up: Francois Romieu
  16 *
  17 * This source has not been verified for use in safety critical systems.
  18 *
  19 * Please direct queries about the revamped driver to the linux-kernel
  20 * list not VIA.
  21 *
  22 * Original code:
  23 *
  24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  25 * All rights reserved.
  26 *
  27 * This software may be redistributed and/or modified under
  28 * the terms of the GNU General Public License as published by the Free
  29 * Software Foundation; either version 2 of the License, or
  30 * any later version.
  31 *
  32 * This program is distributed in the hope that it will be useful, but
  33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  35 * for more details.
  36 *
  37 * Author: Chuang Liang-Shing, AJ Jiang
  38 *
  39 * Date: Jan 24, 2003
  40 *
  41 * MODULE_LICENSE("GPL");
  42 *
  43 */
  44
  45
  46#include <linux/module.h>
  47#include <linux/types.h>
  48#include <linux/init.h>
  49#include <linux/mm.h>
  50#include <linux/errno.h>
  51#include <linux/ioport.h>
  52#include <linux/pci.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/delay.h>
  58#include <linux/timer.h>
  59#include <linux/slab.h>
  60#include <linux/interrupt.h>
  61#include <linux/string.h>
  62#include <linux/wait.h>
  63#include <linux/io.h>
  64#include <linux/if.h>
  65#include <linux/uaccess.h>
  66#include <linux/proc_fs.h>
  67#include <linux/inetdevice.h>
  68#include <linux/reboot.h>
  69#include <linux/ethtool.h>
  70#include <linux/mii.h>
  71#include <linux/in.h>
  72#include <linux/if_arp.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/udp.h>
  77#include <linux/crc-ccitt.h>
  78#include <linux/crc32.h>
  79
  80#include "via-velocity.h"
  81
  82
  83static int velocity_nics;
  84static int msglevel = MSG_LEVEL_INFO;
  85
  86/**
  87 *      mac_get_cam_mask        -       Read a CAM mask
  88 *      @regs: register block for this velocity
  89 *      @mask: buffer to store mask
  90 *
  91 *      Fetch the mask bits of the selected CAM and store them into the
  92 *      provided mask buffer.
  93 */
  94static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
  95{
  96        int i;
  97
  98        /* Select CAM mask */
  99        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 100
 101        writeb(0, &regs->CAMADDR);
 102
 103        /* read mask */
 104        for (i = 0; i < 8; i++)
 105                *mask++ = readb(&(regs->MARCAM[i]));
 106
 107        /* disable CAMEN */
 108        writeb(0, &regs->CAMADDR);
 109
 110        /* Select mar */
 111        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 112}
 113
 114
 115/**
 116 *      mac_set_cam_mask        -       Set a CAM mask
 117 *      @regs: register block for this velocity
 118 *      @mask: CAM mask to load
 119 *
 120 *      Store a new mask into a CAM
 121 */
 122static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 123{
 124        int i;
 125        /* Select CAM mask */
 126        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 127
 128        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 129
 130        for (i = 0; i < 8; i++)
 131                writeb(*mask++, &(regs->MARCAM[i]));
 132
 133        /* disable CAMEN */
 134        writeb(0, &regs->CAMADDR);
 135
 136        /* Select mar */
 137        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 138}
 139
 140static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 141{
 142        int i;
 143        /* Select CAM mask */
 144        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 145
 146        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 147
 148        for (i = 0; i < 8; i++)
 149                writeb(*mask++, &(regs->MARCAM[i]));
 150
 151        /* disable CAMEN */
 152        writeb(0, &regs->CAMADDR);
 153
 154        /* Select mar */
 155        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 156}
 157
 158/**
 159 *      mac_set_cam     -       set CAM data
 160 *      @regs: register block of this velocity
 161 *      @idx: Cam index
 162 *      @addr: 2 or 6 bytes of CAM data
 163 *
 164 *      Load an address or vlan tag into a CAM
 165 */
 166static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 167{
 168        int i;
 169
 170        /* Select CAM mask */
 171        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 172
 173        idx &= (64 - 1);
 174
 175        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 176
 177        for (i = 0; i < 6; i++)
 178                writeb(*addr++, &(regs->MARCAM[i]));
 179
 180        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 181
 182        udelay(10);
 183
 184        writeb(0, &regs->CAMADDR);
 185
 186        /* Select mar */
 187        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 188}
 189
 190static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 191                             const u8 *addr)
 192{
 193
 194        /* Select CAM mask */
 195        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 196
 197        idx &= (64 - 1);
 198
 199        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 200        writew(*((u16 *) addr), &regs->MARCAM[0]);
 201
 202        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 203
 204        udelay(10);
 205
 206        writeb(0, &regs->CAMADDR);
 207
 208        /* Select mar */
 209        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 210}
 211
 212
 213/**
 214 *      mac_wol_reset   -       reset WOL after exiting low power
 215 *      @regs: register block of this velocity
 216 *
 217 *      Called after we drop out of wake on lan mode in order to
 218 *      reset the Wake on lan features. This function doesn't restore
 219 *      the rest of the logic from the result of sleep/wakeup
 220 */
 221static void mac_wol_reset(struct mac_regs __iomem *regs)
 222{
 223
 224        /* Turn off SWPTAG right after leaving power mode */
 225        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 226        /* clear sticky bits */
 227        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 228
 229        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 230        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 231        /* disable force PME-enable */
 232        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 233        /* disable power-event config bit */
 234        writew(0xFFFF, &regs->WOLCRClr);
 235        /* clear power status */
 236        writew(0xFFFF, &regs->WOLSRClr);
 237}
 238
 239static const struct ethtool_ops velocity_ethtool_ops;
 240
 241/*
 242    Define module options
 243*/
 244
 245MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 246MODULE_LICENSE("GPL");
 247MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 248
 249#define VELOCITY_PARAM(N, D) \
 250        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 251        module_param_array(N, int, NULL, 0); \
 252        MODULE_PARM_DESC(N, D);
 253
 254#define RX_DESC_MIN     64
 255#define RX_DESC_MAX     255
 256#define RX_DESC_DEF     64
 257VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 258
 259#define TX_DESC_MIN     16
 260#define TX_DESC_MAX     256
 261#define TX_DESC_DEF     64
 262VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 263
 264#define RX_THRESH_MIN   0
 265#define RX_THRESH_MAX   3
 266#define RX_THRESH_DEF   0
 267/* rx_thresh[] is used for controlling the receive fifo threshold.
 268   0: indicate the rxfifo threshold is 128 bytes.
 269   1: indicate the rxfifo threshold is 512 bytes.
 270   2: indicate the rxfifo threshold is 1024 bytes.
 271   3: indicate the rxfifo threshold is store & forward.
 272*/
 273VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 274
 275#define DMA_LENGTH_MIN  0
 276#define DMA_LENGTH_MAX  7
 277#define DMA_LENGTH_DEF  6
 278
 279/* DMA_length[] is used for controlling the DMA length
 280   0: 8 DWORDs
 281   1: 16 DWORDs
 282   2: 32 DWORDs
 283   3: 64 DWORDs
 284   4: 128 DWORDs
 285   5: 256 DWORDs
 286   6: SF(flush till emply)
 287   7: SF(flush till emply)
 288*/
 289VELOCITY_PARAM(DMA_length, "DMA length");
 290
 291#define IP_ALIG_DEF     0
 292/* IP_byte_align[] is used for IP header DWORD byte aligned
 293   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 294   1: indicate the IP header will be DWORD byte aligned.
 295      In some environment, the IP header should be DWORD byte aligned,
 296      or the packet will be droped when we receive it. (eg: IPVS)
 297*/
 298VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 299
 300#define FLOW_CNTL_DEF   1
 301#define FLOW_CNTL_MIN   1
 302#define FLOW_CNTL_MAX   5
 303
 304/* flow_control[] is used for setting the flow control ability of NIC.
 305   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 306   2: enable TX flow control.
 307   3: enable RX flow control.
 308   4: enable RX/TX flow control.
 309   5: disable
 310*/
 311VELOCITY_PARAM(flow_control, "Enable flow control ability");
 312
 313#define MED_LNK_DEF 0
 314#define MED_LNK_MIN 0
 315#define MED_LNK_MAX 5
 316/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 317   0: indicate autonegotiation for both speed and duplex mode
 318   1: indicate 100Mbps half duplex mode
 319   2: indicate 100Mbps full duplex mode
 320   3: indicate 10Mbps half duplex mode
 321   4: indicate 10Mbps full duplex mode
 322   5: indicate 1000Mbps full duplex mode
 323
 324   Note:
 325   if EEPROM have been set to the force mode, this option is ignored
 326   by driver.
 327*/
 328VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 329
 330#define VAL_PKT_LEN_DEF     0
 331/* ValPktLen[] is used for setting the checksum offload ability of NIC.
 332   0: Receive frame with invalid layer 2 length (Default)
 333   1: Drop frame with invalid layer 2 length
 334*/
 335VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
 336
 337#define WOL_OPT_DEF     0
 338#define WOL_OPT_MIN     0
 339#define WOL_OPT_MAX     7
 340/* wol_opts[] is used for controlling wake on lan behavior.
 341   0: Wake up if recevied a magic packet. (Default)
 342   1: Wake up if link status is on/off.
 343   2: Wake up if recevied an arp packet.
 344   4: Wake up if recevied any unicast packet.
 345   Those value can be sumed up to support more than one option.
 346*/
 347VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 348
 349static int rx_copybreak = 200;
 350module_param(rx_copybreak, int, 0644);
 351MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 352
 353/*
 354 *      Internal board variants. At the moment we have only one
 355 */
 356static struct velocity_info_tbl chip_info_table[] = {
 357        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 358        { }
 359};
 360
 361/*
 362 *      Describe the PCI device identifiers that we support in this
 363 *      device driver. Used for hotplug autoloading.
 364 */
 365static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
 366        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 367        { }
 368};
 369
 370MODULE_DEVICE_TABLE(pci, velocity_id_table);
 371
 372/**
 373 *      get_chip_name   -       identifier to name
 374 *      @id: chip identifier
 375 *
 376 *      Given a chip identifier return a suitable description. Returns
 377 *      a pointer a static string valid while the driver is loaded.
 378 */
 379static const char __devinit *get_chip_name(enum chip_type chip_id)
 380{
 381        int i;
 382        for (i = 0; chip_info_table[i].name != NULL; i++)
 383                if (chip_info_table[i].chip_id == chip_id)
 384                        break;
 385        return chip_info_table[i].name;
 386}
 387
 388/**
 389 *      velocity_remove1        -       device unplug
 390 *      @pdev: PCI device being removed
 391 *
 392 *      Device unload callback. Called on an unplug or on module
 393 *      unload for each active device that is present. Disconnects
 394 *      the device from the network layer and frees all the resources
 395 */
 396static void __devexit velocity_remove1(struct pci_dev *pdev)
 397{
 398        struct net_device *dev = pci_get_drvdata(pdev);
 399        struct velocity_info *vptr = netdev_priv(dev);
 400
 401        unregister_netdev(dev);
 402        iounmap(vptr->mac_regs);
 403        pci_release_regions(pdev);
 404        pci_disable_device(pdev);
 405        pci_set_drvdata(pdev, NULL);
 406        free_netdev(dev);
 407
 408        velocity_nics--;
 409}
 410
 411/**
 412 *      velocity_set_int_opt    -       parser for integer options
 413 *      @opt: pointer to option value
 414 *      @val: value the user requested (or -1 for default)
 415 *      @min: lowest value allowed
 416 *      @max: highest value allowed
 417 *      @def: default value
 418 *      @name: property name
 419 *      @dev: device name
 420 *
 421 *      Set an integer property in the module options. This function does
 422 *      all the verification and checking as well as reporting so that
 423 *      we don't duplicate code for each option.
 424 */
 425static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
 426{
 427        if (val == -1)
 428                *opt = def;
 429        else if (val < min || val > max) {
 430                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 431                                        devname, name, min, max);
 432                *opt = def;
 433        } else {
 434                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 435                                        devname, name, val);
 436                *opt = val;
 437        }
 438}
 439
 440/**
 441 *      velocity_set_bool_opt   -       parser for boolean options
 442 *      @opt: pointer to option value
 443 *      @val: value the user requested (or -1 for default)
 444 *      @def: default value (yes/no)
 445 *      @flag: numeric value to set for true.
 446 *      @name: property name
 447 *      @dev: device name
 448 *
 449 *      Set a boolean property in the module options. This function does
 450 *      all the verification and checking as well as reporting so that
 451 *      we don't duplicate code for each option.
 452 */
 453static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
 454{
 455        (*opt) &= (~flag);
 456        if (val == -1)
 457                *opt |= (def ? flag : 0);
 458        else if (val < 0 || val > 1) {
 459                printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 460                        devname, name);
 461                *opt |= (def ? flag : 0);
 462        } else {
 463                printk(KERN_INFO "%s: set parameter %s to %s\n",
 464                        devname, name, val ? "TRUE" : "FALSE");
 465                *opt |= (val ? flag : 0);
 466        }
 467}
 468
 469/**
 470 *      velocity_get_options    -       set options on device
 471 *      @opts: option structure for the device
 472 *      @index: index of option to use in module options array
 473 *      @devname: device name
 474 *
 475 *      Turn the module and command options into a single structure
 476 *      for the current device
 477 */
 478static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
 479{
 480
 481        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 482        velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 483        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 484        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 485
 486        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 487        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 488        velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 489        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 490        velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 491        opts->numrx = (opts->numrx & ~3);
 492}
 493
 494/**
 495 *      velocity_init_cam_filter        -       initialise CAM
 496 *      @vptr: velocity to program
 497 *
 498 *      Initialize the content addressable memory used for filters. Load
 499 *      appropriately according to the presence of VLAN
 500 */
 501static void velocity_init_cam_filter(struct velocity_info *vptr)
 502{
 503        struct mac_regs __iomem *regs = vptr->mac_regs;
 504
 505        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 506        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 507        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 508
 509        /* Disable all CAMs */
 510        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 511        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 512        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 513        mac_set_cam_mask(regs, vptr->mCAMmask);
 514
 515        /* Enable VCAMs */
 516        if (vptr->vlgrp) {
 517                unsigned int vid, i = 0;
 518
 519                if (!vlan_group_get_device(vptr->vlgrp, 0))
 520                        WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
 521
 522                for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
 523                        if (vlan_group_get_device(vptr->vlgrp, vid)) {
 524                                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 525                                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 526                                if (++i >= VCAM_SIZE)
 527                                        break;
 528                        }
 529                }
 530                mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 531        }
 532}
 533
 534static void velocity_vlan_rx_register(struct net_device *dev,
 535                                      struct vlan_group *grp)
 536{
 537        struct velocity_info *vptr = netdev_priv(dev);
 538
 539        vptr->vlgrp = grp;
 540}
 541
 542static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 543{
 544        struct velocity_info *vptr = netdev_priv(dev);
 545
 546        spin_lock_irq(&vptr->lock);
 547        velocity_init_cam_filter(vptr);
 548        spin_unlock_irq(&vptr->lock);
 549}
 550
 551static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 552{
 553        struct velocity_info *vptr = netdev_priv(dev);
 554
 555        spin_lock_irq(&vptr->lock);
 556        vlan_group_set_device(vptr->vlgrp, vid, NULL);
 557        velocity_init_cam_filter(vptr);
 558        spin_unlock_irq(&vptr->lock);
 559}
 560
 561static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 562{
 563        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 564}
 565
 566/**
 567 *      velocity_rx_reset       -       handle a receive reset
 568 *      @vptr: velocity we are resetting
 569 *
 570 *      Reset the ownership and status for the receive ring side.
 571 *      Hand all the receive queue to the NIC.
 572 */
 573static void velocity_rx_reset(struct velocity_info *vptr)
 574{
 575
 576        struct mac_regs __iomem *regs = vptr->mac_regs;
 577        int i;
 578
 579        velocity_init_rx_ring_indexes(vptr);
 580
 581        /*
 582         *      Init state, all RD entries belong to the NIC
 583         */
 584        for (i = 0; i < vptr->options.numrx; ++i)
 585                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 586
 587        writew(vptr->options.numrx, &regs->RBRDU);
 588        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 589        writew(0, &regs->RDIdx);
 590        writew(vptr->options.numrx - 1, &regs->RDCSize);
 591}
 592
 593/**
 594 *      velocity_get_opt_media_mode     -       get media selection
 595 *      @vptr: velocity adapter
 596 *
 597 *      Get the media mode stored in EEPROM or module options and load
 598 *      mii_status accordingly. The requested link state information
 599 *      is also returned.
 600 */
 601static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 602{
 603        u32 status = 0;
 604
 605        switch (vptr->options.spd_dpx) {
 606        case SPD_DPX_AUTO:
 607                status = VELOCITY_AUTONEG_ENABLE;
 608                break;
 609        case SPD_DPX_100_FULL:
 610                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 611                break;
 612        case SPD_DPX_10_FULL:
 613                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 614                break;
 615        case SPD_DPX_100_HALF:
 616                status = VELOCITY_SPEED_100;
 617                break;
 618        case SPD_DPX_10_HALF:
 619                status = VELOCITY_SPEED_10;
 620                break;
 621        case SPD_DPX_1000_FULL:
 622                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 623                break;
 624        }
 625        vptr->mii_status = status;
 626        return status;
 627}
 628
 629/**
 630 *      safe_disable_mii_autopoll       -       autopoll off
 631 *      @regs: velocity registers
 632 *
 633 *      Turn off the autopoll and wait for it to disable on the chip
 634 */
 635static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 636{
 637        u16 ww;
 638
 639        /*  turn off MAUTO */
 640        writeb(0, &regs->MIICR);
 641        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 642                udelay(1);
 643                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 644                        break;
 645        }
 646}
 647
 648/**
 649 *      enable_mii_autopoll     -       turn on autopolling
 650 *      @regs: velocity registers
 651 *
 652 *      Enable the MII link status autopoll feature on the Velocity
 653 *      hardware. Wait for it to enable.
 654 */
 655static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 656{
 657        int ii;
 658
 659        writeb(0, &(regs->MIICR));
 660        writeb(MIIADR_SWMPL, &regs->MIIADR);
 661
 662        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 663                udelay(1);
 664                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 665                        break;
 666        }
 667
 668        writeb(MIICR_MAUTO, &regs->MIICR);
 669
 670        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 671                udelay(1);
 672                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 673                        break;
 674        }
 675
 676}
 677
 678/**
 679 *      velocity_mii_read       -       read MII data
 680 *      @regs: velocity registers
 681 *      @index: MII register index
 682 *      @data: buffer for received data
 683 *
 684 *      Perform a single read of an MII 16bit register. Returns zero
 685 *      on success or -ETIMEDOUT if the PHY did not respond.
 686 */
 687static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 688{
 689        u16 ww;
 690
 691        /*
 692         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 693         */
 694        safe_disable_mii_autopoll(regs);
 695
 696        writeb(index, &regs->MIIADR);
 697
 698        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 699
 700        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 701                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 702                        break;
 703        }
 704
 705        *data = readw(&regs->MIIDATA);
 706
 707        enable_mii_autopoll(regs);
 708        if (ww == W_MAX_TIMEOUT)
 709                return -ETIMEDOUT;
 710        return 0;
 711}
 712
 713
 714/**
 715 *      mii_check_media_mode    -       check media state
 716 *      @regs: velocity registers
 717 *
 718 *      Check the current MII status and determine the link status
 719 *      accordingly
 720 */
 721static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 722{
 723        u32 status = 0;
 724        u16 ANAR;
 725
 726        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 727                status |= VELOCITY_LINK_FAIL;
 728
 729        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 730                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 731        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 732                status |= (VELOCITY_SPEED_1000);
 733        else {
 734                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 735                if (ANAR & ADVERTISE_100FULL)
 736                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 737                else if (ANAR & ADVERTISE_100HALF)
 738                        status |= VELOCITY_SPEED_100;
 739                else if (ANAR & ADVERTISE_10FULL)
 740                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 741                else
 742                        status |= (VELOCITY_SPEED_10);
 743        }
 744
 745        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 746                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 747                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 748                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 749                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 750                                status |= VELOCITY_AUTONEG_ENABLE;
 751                }
 752        }
 753
 754        return status;
 755}
 756
 757/**
 758 *      velocity_mii_write      -       write MII data
 759 *      @regs: velocity registers
 760 *      @index: MII register index
 761 *      @data: 16bit data for the MII register
 762 *
 763 *      Perform a single write to an MII 16bit register. Returns zero
 764 *      on success or -ETIMEDOUT if the PHY did not respond.
 765 */
 766static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 767{
 768        u16 ww;
 769
 770        /*
 771         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 772         */
 773        safe_disable_mii_autopoll(regs);
 774
 775        /* MII reg offset */
 776        writeb(mii_addr, &regs->MIIADR);
 777        /* set MII data */
 778        writew(data, &regs->MIIDATA);
 779
 780        /* turn on MIICR_WCMD */
 781        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 782
 783        /* W_MAX_TIMEOUT is the timeout period */
 784        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 785                udelay(5);
 786                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 787                        break;
 788        }
 789        enable_mii_autopoll(regs);
 790
 791        if (ww == W_MAX_TIMEOUT)
 792                return -ETIMEDOUT;
 793        return 0;
 794}
 795
 796/**
 797 *      set_mii_flow_control    -       flow control setup
 798 *      @vptr: velocity interface
 799 *
 800 *      Set up the flow control on this interface according to
 801 *      the supplied user/eeprom options.
 802 */
 803static void set_mii_flow_control(struct velocity_info *vptr)
 804{
 805        /*Enable or Disable PAUSE in ANAR */
 806        switch (vptr->options.flow_cntl) {
 807        case FLOW_CNTL_TX:
 808                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 809                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 810                break;
 811
 812        case FLOW_CNTL_RX:
 813                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 814                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 815                break;
 816
 817        case FLOW_CNTL_TX_RX:
 818                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 819                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 820                break;
 821
 822        case FLOW_CNTL_DISABLE:
 823                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 824                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 825                break;
 826        default:
 827                break;
 828        }
 829}
 830
 831/**
 832 *      mii_set_auto_on         -       autonegotiate on
 833 *      @vptr: velocity
 834 *
 835 *      Enable autonegotation on this interface
 836 */
 837static void mii_set_auto_on(struct velocity_info *vptr)
 838{
 839        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 840                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 841        else
 842                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 843}
 844
 845static u32 check_connection_type(struct mac_regs __iomem *regs)
 846{
 847        u32 status = 0;
 848        u8 PHYSR0;
 849        u16 ANAR;
 850        PHYSR0 = readb(&regs->PHYSR0);
 851
 852        /*
 853           if (!(PHYSR0 & PHYSR0_LINKGD))
 854           status|=VELOCITY_LINK_FAIL;
 855         */
 856
 857        if (PHYSR0 & PHYSR0_FDPX)
 858                status |= VELOCITY_DUPLEX_FULL;
 859
 860        if (PHYSR0 & PHYSR0_SPDG)
 861                status |= VELOCITY_SPEED_1000;
 862        else if (PHYSR0 & PHYSR0_SPD10)
 863                status |= VELOCITY_SPEED_10;
 864        else
 865                status |= VELOCITY_SPEED_100;
 866
 867        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 868                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 869                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 870                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 871                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 872                                status |= VELOCITY_AUTONEG_ENABLE;
 873                }
 874        }
 875
 876        return status;
 877}
 878
 879
 880
 881/**
 882 *      velocity_set_media_mode         -       set media mode
 883 *      @mii_status: old MII link state
 884 *
 885 *      Check the media link state and configure the flow control
 886 *      PHY and also velocity hardware setup accordingly. In particular
 887 *      we need to set up CD polling and frame bursting.
 888 */
 889static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 890{
 891        u32 curr_status;
 892        struct mac_regs __iomem *regs = vptr->mac_regs;
 893
 894        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 895        curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 896
 897        /* Set mii link status */
 898        set_mii_flow_control(vptr);
 899
 900        /*
 901           Check if new status is consistent with current status
 902           if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 903               (mii_status==curr_status)) {
 904           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 905           vptr->mii_status=check_connection_type(vptr->mac_regs);
 906           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 907           return 0;
 908           }
 909         */
 910
 911        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 912                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 913
 914        /*
 915         *      If connection type is AUTO
 916         */
 917        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 918                VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 919                /* clear force MAC mode bit */
 920                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 921                /* set duplex mode of MAC according to duplex mode of MII */
 922                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 923                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 924                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 925
 926                /* enable AUTO-NEGO mode */
 927                mii_set_auto_on(vptr);
 928        } else {
 929                u16 CTRL1000;
 930                u16 ANAR;
 931                u8 CHIPGCR;
 932
 933                /*
 934                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 935                 *    and enable it in fullduplex mode
 936                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 937                 * 3. only enable CD heart beat counter in 10HD mode
 938                 */
 939
 940                /* set force MAC mode bit */
 941                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 942
 943                CHIPGCR = readb(&regs->CHIPGCR);
 944
 945                if (mii_status & VELOCITY_SPEED_1000)
 946                        CHIPGCR |= CHIPGCR_FCGMII;
 947                else
 948                        CHIPGCR &= ~CHIPGCR_FCGMII;
 949
 950                if (mii_status & VELOCITY_DUPLEX_FULL) {
 951                        CHIPGCR |= CHIPGCR_FCFDX;
 952                        writeb(CHIPGCR, &regs->CHIPGCR);
 953                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 954                        if (vptr->rev_id < REV_ID_VT3216_A0)
 955                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 956                } else {
 957                        CHIPGCR &= ~CHIPGCR_FCFDX;
 958                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 959                        writeb(CHIPGCR, &regs->CHIPGCR);
 960                        if (vptr->rev_id < REV_ID_VT3216_A0)
 961                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 962                }
 963
 964                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 965                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 966                if ((mii_status & VELOCITY_SPEED_1000) &&
 967                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 968                        CTRL1000 |= ADVERTISE_1000FULL;
 969                }
 970                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 971
 972                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 973                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 974                else
 975                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 976
 977                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 978                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 979                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 980                if (mii_status & VELOCITY_SPEED_100) {
 981                        if (mii_status & VELOCITY_DUPLEX_FULL)
 982                                ANAR |= ADVERTISE_100FULL;
 983                        else
 984                                ANAR |= ADVERTISE_100HALF;
 985                } else if (mii_status & VELOCITY_SPEED_10) {
 986                        if (mii_status & VELOCITY_DUPLEX_FULL)
 987                                ANAR |= ADVERTISE_10FULL;
 988                        else
 989                                ANAR |= ADVERTISE_10HALF;
 990                }
 991                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 992                /* enable AUTO-NEGO mode */
 993                mii_set_auto_on(vptr);
 994                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 995        }
 996        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 997        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 998        return VELOCITY_LINK_CHANGE;
 999}
1000
1001/**
1002 *      velocity_print_link_status      -       link status reporting
1003 *      @vptr: velocity to report on
1004 *
1005 *      Turn the link status of the velocity card into a kernel log
1006 *      description of the new link state, detailing speed and duplex
1007 *      status
1008 */
1009static void velocity_print_link_status(struct velocity_info *vptr)
1010{
1011
1012        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1013                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1014        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1015                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1016
1017                if (vptr->mii_status & VELOCITY_SPEED_1000)
1018                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1019                else if (vptr->mii_status & VELOCITY_SPEED_100)
1020                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1021                else
1022                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1023
1024                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1025                        VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1026                else
1027                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1028        } else {
1029                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1030                switch (vptr->options.spd_dpx) {
1031                case SPD_DPX_1000_FULL:
1032                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1033                        break;
1034                case SPD_DPX_100_HALF:
1035                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1036                        break;
1037                case SPD_DPX_100_FULL:
1038                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1039                        break;
1040                case SPD_DPX_10_HALF:
1041                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1042                        break;
1043                case SPD_DPX_10_FULL:
1044                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1045                        break;
1046                default:
1047                        break;
1048                }
1049        }
1050}
1051
1052/**
1053 *      enable_flow_control_ability     -       flow control
1054 *      @vptr: veloity to configure
1055 *
1056 *      Set up flow control according to the flow control options
1057 *      determined by the eeprom/configuration.
1058 */
1059static void enable_flow_control_ability(struct velocity_info *vptr)
1060{
1061
1062        struct mac_regs __iomem *regs = vptr->mac_regs;
1063
1064        switch (vptr->options.flow_cntl) {
1065
1066        case FLOW_CNTL_DEFAULT:
1067                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1068                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1069                else
1070                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1071
1072                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1073                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1074                else
1075                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1076                break;
1077
1078        case FLOW_CNTL_TX:
1079                writel(CR0_FDXTFCEN, &regs->CR0Set);
1080                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1081                break;
1082
1083        case FLOW_CNTL_RX:
1084                writel(CR0_FDXRFCEN, &regs->CR0Set);
1085                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1086                break;
1087
1088        case FLOW_CNTL_TX_RX:
1089                writel(CR0_FDXTFCEN, &regs->CR0Set);
1090                writel(CR0_FDXRFCEN, &regs->CR0Set);
1091                break;
1092
1093        case FLOW_CNTL_DISABLE:
1094                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1095                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1096                break;
1097
1098        default:
1099                break;
1100        }
1101
1102}
1103
1104/**
1105 *      velocity_soft_reset     -       soft reset
1106 *      @vptr: velocity to reset
1107 *
1108 *      Kick off a soft reset of the velocity adapter and then poll
1109 *      until the reset sequence has completed before returning.
1110 */
1111static int velocity_soft_reset(struct velocity_info *vptr)
1112{
1113        struct mac_regs __iomem *regs = vptr->mac_regs;
1114        int i = 0;
1115
1116        writel(CR0_SFRST, &regs->CR0Set);
1117
1118        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1119                udelay(5);
1120                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1121                        break;
1122        }
1123
1124        if (i == W_MAX_TIMEOUT) {
1125                writel(CR0_FORSRST, &regs->CR0Set);
1126                /* FIXME: PCI POSTING */
1127                /* delay 2ms */
1128                mdelay(2);
1129        }
1130        return 0;
1131}
1132
1133/**
1134 *      velocity_set_multi      -       filter list change callback
1135 *      @dev: network device
1136 *
1137 *      Called by the network layer when the filter lists need to change
1138 *      for a velocity adapter. Reload the CAMs with the new address
1139 *      filter ruleset.
1140 */
1141static void velocity_set_multi(struct net_device *dev)
1142{
1143        struct velocity_info *vptr = netdev_priv(dev);
1144        struct mac_regs __iomem *regs = vptr->mac_regs;
1145        u8 rx_mode;
1146        int i;
1147        struct netdev_hw_addr *ha;
1148
1149        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1150                writel(0xffffffff, &regs->MARCAM[0]);
1151                writel(0xffffffff, &regs->MARCAM[4]);
1152                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1153        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1154                   (dev->flags & IFF_ALLMULTI)) {
1155                writel(0xffffffff, &regs->MARCAM[0]);
1156                writel(0xffffffff, &regs->MARCAM[4]);
1157                rx_mode = (RCR_AM | RCR_AB);
1158        } else {
1159                int offset = MCAM_SIZE - vptr->multicast_limit;
1160                mac_get_cam_mask(regs, vptr->mCAMmask);
1161
1162                i = 0;
1163                netdev_for_each_mc_addr(ha, dev) {
1164                        mac_set_cam(regs, i + offset, ha->addr);
1165                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1166                        i++;
1167                }
1168
1169                mac_set_cam_mask(regs, vptr->mCAMmask);
1170                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1171        }
1172        if (dev->mtu > 1500)
1173                rx_mode |= RCR_AL;
1174
1175        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1176
1177}
1178
1179/*
1180 * MII access , media link mode setting functions
1181 */
1182
1183/**
1184 *      mii_init        -       set up MII
1185 *      @vptr: velocity adapter
1186 *      @mii_status:  links tatus
1187 *
1188 *      Set up the PHY for the current link state.
1189 */
1190static void mii_init(struct velocity_info *vptr, u32 mii_status)
1191{
1192        u16 BMCR;
1193
1194        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1195        case PHYID_CICADA_CS8201:
1196                /*
1197                 *      Reset to hardware default
1198                 */
1199                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1200                /*
1201                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1202                 *      off it in NWay-forced half mode for NWay-forced v.s.
1203                 *      legacy-forced issue.
1204                 */
1205                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1206                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1207                else
1208                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1209                /*
1210                 *      Turn on Link/Activity LED enable bit for CIS8201
1211                 */
1212                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1213                break;
1214        case PHYID_VT3216_32BIT:
1215        case PHYID_VT3216_64BIT:
1216                /*
1217                 *      Reset to hardware default
1218                 */
1219                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1220                /*
1221                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1222                 *      off it in NWay-forced half mode for NWay-forced v.s.
1223                 *      legacy-forced issue
1224                 */
1225                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1226                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1227                else
1228                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1229                break;
1230
1231        case PHYID_MARVELL_1000:
1232        case PHYID_MARVELL_1000S:
1233                /*
1234                 *      Assert CRS on Transmit
1235                 */
1236                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1237                /*
1238                 *      Reset to hardware default
1239                 */
1240                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1241                break;
1242        default:
1243                ;
1244        }
1245        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1246        if (BMCR & BMCR_ISOLATE) {
1247                BMCR &= ~BMCR_ISOLATE;
1248                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1249        }
1250}
1251
1252/**
1253 * setup_queue_timers   -       Setup interrupt timers
1254 *
1255 * Setup interrupt frequency during suppression (timeout if the frame
1256 * count isn't filled).
1257 */
1258static void setup_queue_timers(struct velocity_info *vptr)
1259{
1260        /* Only for newer revisions */
1261        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1262                u8 txqueue_timer = 0;
1263                u8 rxqueue_timer = 0;
1264
1265                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1266                                VELOCITY_SPEED_100)) {
1267                        txqueue_timer = vptr->options.txqueue_timer;
1268                        rxqueue_timer = vptr->options.rxqueue_timer;
1269                }
1270
1271                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1272                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1273        }
1274}
1275/**
1276 * setup_adaptive_interrupts  -  Setup interrupt suppression
1277 *
1278 * @vptr velocity adapter
1279 *
1280 * The velocity is able to suppress interrupt during high interrupt load.
1281 * This function turns on that feature.
1282 */
1283static void setup_adaptive_interrupts(struct velocity_info *vptr)
1284{
1285        struct mac_regs __iomem *regs = vptr->mac_regs;
1286        u16 tx_intsup = vptr->options.tx_intsup;
1287        u16 rx_intsup = vptr->options.rx_intsup;
1288
1289        /* Setup default interrupt mask (will be changed below) */
1290        vptr->int_mask = INT_MASK_DEF;
1291
1292        /* Set Tx Interrupt Suppression Threshold */
1293        writeb(CAMCR_PS0, &regs->CAMCR);
1294        if (tx_intsup != 0) {
1295                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1296                                ISR_PTX2I | ISR_PTX3I);
1297                writew(tx_intsup, &regs->ISRCTL);
1298        } else
1299                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1300
1301        /* Set Rx Interrupt Suppression Threshold */
1302        writeb(CAMCR_PS1, &regs->CAMCR);
1303        if (rx_intsup != 0) {
1304                vptr->int_mask &= ~ISR_PRXI;
1305                writew(rx_intsup, &regs->ISRCTL);
1306        } else
1307                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1308
1309        /* Select page to interrupt hold timer */
1310        writeb(0, &regs->CAMCR);
1311}
1312
1313/**
1314 *      velocity_init_registers -       initialise MAC registers
1315 *      @vptr: velocity to init
1316 *      @type: type of initialisation (hot or cold)
1317 *
1318 *      Initialise the MAC on a reset or on first set up on the
1319 *      hardware.
1320 */
1321static void velocity_init_registers(struct velocity_info *vptr,
1322                                    enum velocity_init_type type)
1323{
1324        struct mac_regs __iomem *regs = vptr->mac_regs;
1325        int i, mii_status;
1326
1327        mac_wol_reset(regs);
1328
1329        switch (type) {
1330        case VELOCITY_INIT_RESET:
1331        case VELOCITY_INIT_WOL:
1332
1333                netif_stop_queue(vptr->dev);
1334
1335                /*
1336                 *      Reset RX to prevent RX pointer not on the 4X location
1337                 */
1338                velocity_rx_reset(vptr);
1339                mac_rx_queue_run(regs);
1340                mac_rx_queue_wake(regs);
1341
1342                mii_status = velocity_get_opt_media_mode(vptr);
1343                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1344                        velocity_print_link_status(vptr);
1345                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1346                                netif_wake_queue(vptr->dev);
1347                }
1348
1349                enable_flow_control_ability(vptr);
1350
1351                mac_clear_isr(regs);
1352                writel(CR0_STOP, &regs->CR0Clr);
1353                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1354                                                        &regs->CR0Set);
1355
1356                break;
1357
1358        case VELOCITY_INIT_COLD:
1359        default:
1360                /*
1361                 *      Do reset
1362                 */
1363                velocity_soft_reset(vptr);
1364                mdelay(5);
1365
1366                mac_eeprom_reload(regs);
1367                for (i = 0; i < 6; i++)
1368                        writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1369
1370                /*
1371                 *      clear Pre_ACPI bit.
1372                 */
1373                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1374                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1375                mac_set_dma_length(regs, vptr->options.DMA_length);
1376
1377                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1378                /*
1379                 *      Back off algorithm use original IEEE standard
1380                 */
1381                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1382
1383                /*
1384                 *      Init CAM filter
1385                 */
1386                velocity_init_cam_filter(vptr);
1387
1388                /*
1389                 *      Set packet filter: Receive directed and broadcast address
1390                 */
1391                velocity_set_multi(vptr->dev);
1392
1393                /*
1394                 *      Enable MII auto-polling
1395                 */
1396                enable_mii_autopoll(regs);
1397
1398                setup_adaptive_interrupts(vptr);
1399
1400                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1401                writew(vptr->options.numrx - 1, &regs->RDCSize);
1402                mac_rx_queue_run(regs);
1403                mac_rx_queue_wake(regs);
1404
1405                writew(vptr->options.numtx - 1, &regs->TDCSize);
1406
1407                for (i = 0; i < vptr->tx.numq; i++) {
1408                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1409                        mac_tx_queue_run(regs, i);
1410                }
1411
1412                init_flow_control_register(vptr);
1413
1414                writel(CR0_STOP, &regs->CR0Clr);
1415                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1416
1417                mii_status = velocity_get_opt_media_mode(vptr);
1418                netif_stop_queue(vptr->dev);
1419
1420                mii_init(vptr, mii_status);
1421
1422                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1423                        velocity_print_link_status(vptr);
1424                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1425                                netif_wake_queue(vptr->dev);
1426                }
1427
1428                enable_flow_control_ability(vptr);
1429                mac_hw_mibs_init(regs);
1430                mac_write_int_mask(vptr->int_mask, regs);
1431                mac_clear_isr(regs);
1432
1433        }
1434}
1435
1436static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1437{
1438        struct mac_regs __iomem *regs = vptr->mac_regs;
1439        int avail, dirty, unusable;
1440
1441        /*
1442         * RD number must be equal to 4X per hardware spec
1443         * (programming guide rev 1.20, p.13)
1444         */
1445        if (vptr->rx.filled < 4)
1446                return;
1447
1448        wmb();
1449
1450        unusable = vptr->rx.filled & 0x0003;
1451        dirty = vptr->rx.dirty - unusable;
1452        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1453                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1454                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1455        }
1456
1457        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1458        vptr->rx.filled = unusable;
1459}
1460
1461/**
1462 *      velocity_init_dma_rings -       set up DMA rings
1463 *      @vptr: Velocity to set up
1464 *
1465 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1466 *      to use.
1467 */
1468static int velocity_init_dma_rings(struct velocity_info *vptr)
1469{
1470        struct velocity_opt *opt = &vptr->options;
1471        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1472        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1473        struct pci_dev *pdev = vptr->pdev;
1474        dma_addr_t pool_dma;
1475        void *pool;
1476        unsigned int i;
1477
1478        /*
1479         * Allocate all RD/TD rings a single pool.
1480         *
1481         * pci_alloc_consistent() fulfills the requirement for 64 bytes
1482         * alignment
1483         */
1484        pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1485                                    rx_ring_size, &pool_dma);
1486        if (!pool) {
1487                dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1488                        vptr->dev->name);
1489                return -ENOMEM;
1490        }
1491
1492        vptr->rx.ring = pool;
1493        vptr->rx.pool_dma = pool_dma;
1494
1495        pool += rx_ring_size;
1496        pool_dma += rx_ring_size;
1497
1498        for (i = 0; i < vptr->tx.numq; i++) {
1499                vptr->tx.rings[i] = pool;
1500                vptr->tx.pool_dma[i] = pool_dma;
1501                pool += tx_ring_size;
1502                pool_dma += tx_ring_size;
1503        }
1504
1505        return 0;
1506}
1507
1508static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1509{
1510        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1511}
1512
1513/**
1514 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1515 *      @vptr: velocity
1516 *      @idx: ring index
1517 *
1518 *      Allocate a new full sized buffer for the reception of a frame and
1519 *      map it into PCI space for the hardware to use. The hardware
1520 *      requires *64* byte alignment of the buffer which makes life
1521 *      less fun than would be ideal.
1522 */
1523static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1524{
1525        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1526        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1527
1528        rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1529        if (rd_info->skb == NULL)
1530                return -ENOMEM;
1531
1532        /*
1533         *      Do the gymnastics to get the buffer head for data at
1534         *      64byte alignment.
1535         */
1536        skb_reserve(rd_info->skb,
1537                        64 - ((unsigned long) rd_info->skb->data & 63));
1538        rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1539                                        vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1540
1541        /*
1542         *      Fill in the descriptor to match
1543         */
1544
1545        *((u32 *) & (rd->rdesc0)) = 0;
1546        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1547        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1548        rd->pa_high = 0;
1549        return 0;
1550}
1551
1552
1553static int velocity_rx_refill(struct velocity_info *vptr)
1554{
1555        int dirty = vptr->rx.dirty, done = 0;
1556
1557        do {
1558                struct rx_desc *rd = vptr->rx.ring + dirty;
1559
1560                /* Fine for an all zero Rx desc at init time as well */
1561                if (rd->rdesc0.len & OWNED_BY_NIC)
1562                        break;
1563
1564                if (!vptr->rx.info[dirty].skb) {
1565                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1566                                break;
1567                }
1568                done++;
1569                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1570        } while (dirty != vptr->rx.curr);
1571
1572        if (done) {
1573                vptr->rx.dirty = dirty;
1574                vptr->rx.filled += done;
1575        }
1576
1577        return done;
1578}
1579
1580/**
1581 *      velocity_free_rd_ring   -       free receive ring
1582 *      @vptr: velocity to clean up
1583 *
1584 *      Free the receive buffers for each ring slot and any
1585 *      attached socket buffers that need to go away.
1586 */
1587static void velocity_free_rd_ring(struct velocity_info *vptr)
1588{
1589        int i;
1590
1591        if (vptr->rx.info == NULL)
1592                return;
1593
1594        for (i = 0; i < vptr->options.numrx; i++) {
1595                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1596                struct rx_desc *rd = vptr->rx.ring + i;
1597
1598                memset(rd, 0, sizeof(*rd));
1599
1600                if (!rd_info->skb)
1601                        continue;
1602                pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1603                                 PCI_DMA_FROMDEVICE);
1604                rd_info->skb_dma = 0;
1605
1606                dev_kfree_skb(rd_info->skb);
1607                rd_info->skb = NULL;
1608        }
1609
1610        kfree(vptr->rx.info);
1611        vptr->rx.info = NULL;
1612}
1613
1614
1615
1616/**
1617 *      velocity_init_rd_ring   -       set up receive ring
1618 *      @vptr: velocity to configure
1619 *
1620 *      Allocate and set up the receive buffers for each ring slot and
1621 *      assign them to the network adapter.
1622 */
1623static int velocity_init_rd_ring(struct velocity_info *vptr)
1624{
1625        int ret = -ENOMEM;
1626
1627        vptr->rx.info = kcalloc(vptr->options.numrx,
1628                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1629        if (!vptr->rx.info)
1630                goto out;
1631
1632        velocity_init_rx_ring_indexes(vptr);
1633
1634        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1635                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1636                        "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1637                velocity_free_rd_ring(vptr);
1638                goto out;
1639        }
1640
1641        ret = 0;
1642out:
1643        return ret;
1644}
1645
1646/**
1647 *      velocity_init_td_ring   -       set up transmit ring
1648 *      @vptr:  velocity
1649 *
1650 *      Set up the transmit ring and chain the ring pointers together.
1651 *      Returns zero on success or a negative posix errno code for
1652 *      failure.
1653 */
1654static int velocity_init_td_ring(struct velocity_info *vptr)
1655{
1656        int j;
1657
1658        /* Init the TD ring entries */
1659        for (j = 0; j < vptr->tx.numq; j++) {
1660
1661                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1662                                            sizeof(struct velocity_td_info),
1663                                            GFP_KERNEL);
1664                if (!vptr->tx.infos[j]) {
1665                        while (--j >= 0)
1666                                kfree(vptr->tx.infos[j]);
1667                        return -ENOMEM;
1668                }
1669
1670                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1671        }
1672        return 0;
1673}
1674
1675/**
1676 *      velocity_free_dma_rings -       free PCI ring pointers
1677 *      @vptr: Velocity to free from
1678 *
1679 *      Clean up the PCI ring buffers allocated to this velocity.
1680 */
1681static void velocity_free_dma_rings(struct velocity_info *vptr)
1682{
1683        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1684                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1685
1686        pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1687}
1688
1689
1690static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1691{
1692        int ret;
1693
1694        velocity_set_rxbufsize(vptr, mtu);
1695
1696        ret = velocity_init_dma_rings(vptr);
1697        if (ret < 0)
1698                goto out;
1699
1700        ret = velocity_init_rd_ring(vptr);
1701        if (ret < 0)
1702                goto err_free_dma_rings_0;
1703
1704        ret = velocity_init_td_ring(vptr);
1705        if (ret < 0)
1706                goto err_free_rd_ring_1;
1707out:
1708        return ret;
1709
1710err_free_rd_ring_1:
1711        velocity_free_rd_ring(vptr);
1712err_free_dma_rings_0:
1713        velocity_free_dma_rings(vptr);
1714        goto out;
1715}
1716
1717/**
1718 *      velocity_free_tx_buf    -       free transmit buffer
1719 *      @vptr: velocity
1720 *      @tdinfo: buffer
1721 *
1722 *      Release an transmit buffer. If the buffer was preallocated then
1723 *      recycle it, if not then unmap the buffer.
1724 */
1725static void velocity_free_tx_buf(struct velocity_info *vptr,
1726                struct velocity_td_info *tdinfo, struct tx_desc *td)
1727{
1728        struct sk_buff *skb = tdinfo->skb;
1729
1730        /*
1731         *      Don't unmap the pre-allocated tx_bufs
1732         */
1733        if (tdinfo->skb_dma) {
1734                int i;
1735
1736                for (i = 0; i < tdinfo->nskb_dma; i++) {
1737                        size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1738
1739                        /* For scatter-gather */
1740                        if (skb_shinfo(skb)->nr_frags > 0)
1741                                pktlen = max_t(size_t, pktlen,
1742                                                td->td_buf[i].size & ~TD_QUEUE);
1743
1744                        pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1745                                        le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1746                }
1747        }
1748        dev_kfree_skb_irq(skb);
1749        tdinfo->skb = NULL;
1750}
1751
1752
1753/*
1754 *      FIXME: could we merge this with velocity_free_tx_buf ?
1755 */
1756static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1757                                                         int q, int n)
1758{
1759        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1760        int i;
1761
1762        if (td_info == NULL)
1763                return;
1764
1765        if (td_info->skb) {
1766                for (i = 0; i < td_info->nskb_dma; i++) {
1767                        if (td_info->skb_dma[i]) {
1768                                pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1769                                        td_info->skb->len, PCI_DMA_TODEVICE);
1770                                td_info->skb_dma[i] = 0;
1771                        }
1772                }
1773                dev_kfree_skb(td_info->skb);
1774                td_info->skb = NULL;
1775        }
1776}
1777
1778/**
1779 *      velocity_free_td_ring   -       free td ring
1780 *      @vptr: velocity
1781 *
1782 *      Free up the transmit ring for this particular velocity adapter.
1783 *      We free the ring contents but not the ring itself.
1784 */
1785static void velocity_free_td_ring(struct velocity_info *vptr)
1786{
1787        int i, j;
1788
1789        for (j = 0; j < vptr->tx.numq; j++) {
1790                if (vptr->tx.infos[j] == NULL)
1791                        continue;
1792                for (i = 0; i < vptr->options.numtx; i++)
1793                        velocity_free_td_ring_entry(vptr, j, i);
1794
1795                kfree(vptr->tx.infos[j]);
1796                vptr->tx.infos[j] = NULL;
1797        }
1798}
1799
1800
1801static void velocity_free_rings(struct velocity_info *vptr)
1802{
1803        velocity_free_td_ring(vptr);
1804        velocity_free_rd_ring(vptr);
1805        velocity_free_dma_rings(vptr);
1806}
1807
1808/**
1809 *      velocity_error  -       handle error from controller
1810 *      @vptr: velocity
1811 *      @status: card status
1812 *
1813 *      Process an error report from the hardware and attempt to recover
1814 *      the card itself. At the moment we cannot recover from some
1815 *      theoretically impossible errors but this could be fixed using
1816 *      the pci_device_failed logic to bounce the hardware
1817 *
1818 */
1819static void velocity_error(struct velocity_info *vptr, int status)
1820{
1821
1822        if (status & ISR_TXSTLI) {
1823                struct mac_regs __iomem *regs = vptr->mac_regs;
1824
1825                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1826                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1827                writew(TRDCSR_RUN, &regs->TDCSRClr);
1828                netif_stop_queue(vptr->dev);
1829
1830                /* FIXME: port over the pci_device_failed code and use it
1831                   here */
1832        }
1833
1834        if (status & ISR_SRCI) {
1835                struct mac_regs __iomem *regs = vptr->mac_regs;
1836                int linked;
1837
1838                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1839                        vptr->mii_status = check_connection_type(regs);
1840
1841                        /*
1842                         *      If it is a 3119, disable frame bursting in
1843                         *      halfduplex mode and enable it in fullduplex
1844                         *       mode
1845                         */
1846                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1847                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1848                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1849                                else
1850                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1851                        }
1852                        /*
1853                         *      Only enable CD heart beat counter in 10HD mode
1854                         */
1855                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1856                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1857                        else
1858                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1859
1860                        setup_queue_timers(vptr);
1861                }
1862                /*
1863                 *      Get link status from PHYSR0
1864                 */
1865                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1866
1867                if (linked) {
1868                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1869                        netif_carrier_on(vptr->dev);
1870                } else {
1871                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1872                        netif_carrier_off(vptr->dev);
1873                }
1874
1875                velocity_print_link_status(vptr);
1876                enable_flow_control_ability(vptr);
1877
1878                /*
1879                 *      Re-enable auto-polling because SRCI will disable
1880                 *      auto-polling
1881                 */
1882
1883                enable_mii_autopoll(regs);
1884
1885                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1886                        netif_stop_queue(vptr->dev);
1887                else
1888                        netif_wake_queue(vptr->dev);
1889
1890        };
1891        if (status & ISR_MIBFI)
1892                velocity_update_hw_mibs(vptr);
1893        if (status & ISR_LSTEI)
1894                mac_rx_queue_wake(vptr->mac_regs);
1895}
1896
1897/**
1898 *      tx_srv          -       transmit interrupt service
1899 *      @vptr; Velocity
1900 *
1901 *      Scan the queues looking for transmitted packets that
1902 *      we can complete and clean up. Update any statistics as
1903 *      necessary/
1904 */
1905static int velocity_tx_srv(struct velocity_info *vptr)
1906{
1907        struct tx_desc *td;
1908        int qnum;
1909        int full = 0;
1910        int idx;
1911        int works = 0;
1912        struct velocity_td_info *tdinfo;
1913        struct net_device_stats *stats = &vptr->dev->stats;
1914
1915        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1916                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1917                        idx = (idx + 1) % vptr->options.numtx) {
1918
1919                        /*
1920                         *      Get Tx Descriptor
1921                         */
1922                        td = &(vptr->tx.rings[qnum][idx]);
1923                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1924
1925                        if (td->tdesc0.len & OWNED_BY_NIC)
1926                                break;
1927
1928                        if ((works++ > 15))
1929                                break;
1930
1931                        if (td->tdesc0.TSR & TSR0_TERR) {
1932                                stats->tx_errors++;
1933                                stats->tx_dropped++;
1934                                if (td->tdesc0.TSR & TSR0_CDH)
1935                                        stats->tx_heartbeat_errors++;
1936                                if (td->tdesc0.TSR & TSR0_CRS)
1937                                        stats->tx_carrier_errors++;
1938                                if (td->tdesc0.TSR & TSR0_ABT)
1939                                        stats->tx_aborted_errors++;
1940                                if (td->tdesc0.TSR & TSR0_OWC)
1941                                        stats->tx_window_errors++;
1942                        } else {
1943                                stats->tx_packets++;
1944                                stats->tx_bytes += tdinfo->skb->len;
1945                        }
1946                        velocity_free_tx_buf(vptr, tdinfo, td);
1947                        vptr->tx.used[qnum]--;
1948                }
1949                vptr->tx.tail[qnum] = idx;
1950
1951                if (AVAIL_TD(vptr, qnum) < 1)
1952                        full = 1;
1953        }
1954        /*
1955         *      Look to see if we should kick the transmit network
1956         *      layer for more work.
1957         */
1958        if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1959            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1960                netif_wake_queue(vptr->dev);
1961        }
1962        return works;
1963}
1964
1965/**
1966 *      velocity_rx_csum        -       checksum process
1967 *      @rd: receive packet descriptor
1968 *      @skb: network layer packet buffer
1969 *
1970 *      Process the status bits for the received packet and determine
1971 *      if the checksum was computed and verified by the hardware
1972 */
1973static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1974{
1975        skb_checksum_none_assert(skb);
1976
1977        if (rd->rdesc1.CSM & CSM_IPKT) {
1978                if (rd->rdesc1.CSM & CSM_IPOK) {
1979                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1980                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1981                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1982                                        return;
1983                        }
1984                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1985                }
1986        }
1987}
1988
1989/**
1990 *      velocity_rx_copy        -       in place Rx copy for small packets
1991 *      @rx_skb: network layer packet buffer candidate
1992 *      @pkt_size: received data size
1993 *      @rd: receive packet descriptor
1994 *      @dev: network device
1995 *
1996 *      Replace the current skb that is scheduled for Rx processing by a
1997 *      shorter, immediately allocated skb, if the received packet is small
1998 *      enough. This function returns a negative value if the received
1999 *      packet is too big or if memory is exhausted.
2000 */
2001static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2002                            struct velocity_info *vptr)
2003{
2004        int ret = -1;
2005        if (pkt_size < rx_copybreak) {
2006                struct sk_buff *new_skb;
2007
2008                new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
2009                if (new_skb) {
2010                        new_skb->ip_summed = rx_skb[0]->ip_summed;
2011                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2012                        *rx_skb = new_skb;
2013                        ret = 0;
2014                }
2015
2016        }
2017        return ret;
2018}
2019
2020/**
2021 *      velocity_iph_realign    -       IP header alignment
2022 *      @vptr: velocity we are handling
2023 *      @skb: network layer packet buffer
2024 *      @pkt_size: received data size
2025 *
2026 *      Align IP header on a 2 bytes boundary. This behavior can be
2027 *      configured by the user.
2028 */
2029static inline void velocity_iph_realign(struct velocity_info *vptr,
2030                                        struct sk_buff *skb, int pkt_size)
2031{
2032        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2033                memmove(skb->data + 2, skb->data, pkt_size);
2034                skb_reserve(skb, 2);
2035        }
2036}
2037
2038
2039/**
2040 *      velocity_receive_frame  -       received packet processor
2041 *      @vptr: velocity we are handling
2042 *      @idx: ring index
2043 *
2044 *      A packet has arrived. We process the packet and if appropriate
2045 *      pass the frame up the network stack
2046 */
2047static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2048{
2049        void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2050        struct net_device_stats *stats = &vptr->dev->stats;
2051        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2052        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2053        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2054        struct sk_buff *skb;
2055
2056        if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2057                VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2058                stats->rx_length_errors++;
2059                return -EINVAL;
2060        }
2061
2062        if (rd->rdesc0.RSR & RSR_MAR)
2063                stats->multicast++;
2064
2065        skb = rd_info->skb;
2066
2067        pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2068                                    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2069
2070        /*
2071         *      Drop frame not meeting IEEE 802.3
2072         */
2073
2074        if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2075                if (rd->rdesc0.RSR & RSR_RL) {
2076                        stats->rx_length_errors++;
2077                        return -EINVAL;
2078                }
2079        }
2080
2081        pci_action = pci_dma_sync_single_for_device;
2082
2083        velocity_rx_csum(rd, skb);
2084
2085        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2086                velocity_iph_realign(vptr, skb, pkt_len);
2087                pci_action = pci_unmap_single;
2088                rd_info->skb = NULL;
2089        }
2090
2091        pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2092                   PCI_DMA_FROMDEVICE);
2093
2094        skb_put(skb, pkt_len - 4);
2095        skb->protocol = eth_type_trans(skb, vptr->dev);
2096
2097        if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
2098                vlan_hwaccel_rx(skb, vptr->vlgrp,
2099                                swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
2100        } else
2101                netif_rx(skb);
2102
2103        stats->rx_bytes += pkt_len;
2104
2105        return 0;
2106}
2107
2108
2109/**
2110 *      velocity_rx_srv         -       service RX interrupt
2111 *      @vptr: velocity
2112 *
2113 *      Walk the receive ring of the velocity adapter and remove
2114 *      any received packets from the receive queue. Hand the ring
2115 *      slots back to the adapter for reuse.
2116 */
2117static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2118{
2119        struct net_device_stats *stats = &vptr->dev->stats;
2120        int rd_curr = vptr->rx.curr;
2121        int works = 0;
2122
2123        while (works < budget_left) {
2124                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2125
2126                if (!vptr->rx.info[rd_curr].skb)
2127                        break;
2128
2129                if (rd->rdesc0.len & OWNED_BY_NIC)
2130                        break;
2131
2132                rmb();
2133
2134                /*
2135                 *      Don't drop CE or RL error frame although RXOK is off
2136                 */
2137                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2138                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2139                                stats->rx_dropped++;
2140                } else {
2141                        if (rd->rdesc0.RSR & RSR_CRC)
2142                                stats->rx_crc_errors++;
2143                        if (rd->rdesc0.RSR & RSR_FAE)
2144                                stats->rx_frame_errors++;
2145
2146                        stats->rx_dropped++;
2147                }
2148
2149                rd->size |= RX_INTEN;
2150
2151                rd_curr++;
2152                if (rd_curr >= vptr->options.numrx)
2153                        rd_curr = 0;
2154                works++;
2155        }
2156
2157        vptr->rx.curr = rd_curr;
2158
2159        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2160                velocity_give_many_rx_descs(vptr);
2161
2162        VAR_USED(stats);
2163        return works;
2164}
2165
2166static int velocity_poll(struct napi_struct *napi, int budget)
2167{
2168        struct velocity_info *vptr = container_of(napi,
2169                        struct velocity_info, napi);
2170        unsigned int rx_done;
2171        unsigned long flags;
2172
2173        spin_lock_irqsave(&vptr->lock, flags);
2174        /*
2175         * Do rx and tx twice for performance (taken from the VIA
2176         * out-of-tree driver).
2177         */
2178        rx_done = velocity_rx_srv(vptr, budget / 2);
2179        velocity_tx_srv(vptr);
2180        rx_done += velocity_rx_srv(vptr, budget - rx_done);
2181        velocity_tx_srv(vptr);
2182
2183        /* If budget not fully consumed, exit the polling mode */
2184        if (rx_done < budget) {
2185                napi_complete(napi);
2186                mac_enable_int(vptr->mac_regs);
2187        }
2188        spin_unlock_irqrestore(&vptr->lock, flags);
2189
2190        return rx_done;
2191}
2192
2193/**
2194 *      velocity_intr           -       interrupt callback
2195 *      @irq: interrupt number
2196 *      @dev_instance: interrupting device
2197 *
2198 *      Called whenever an interrupt is generated by the velocity
2199 *      adapter IRQ line. We may not be the source of the interrupt
2200 *      and need to identify initially if we are, and if not exit as
2201 *      efficiently as possible.
2202 */
2203static irqreturn_t velocity_intr(int irq, void *dev_instance)
2204{
2205        struct net_device *dev = dev_instance;
2206        struct velocity_info *vptr = netdev_priv(dev);
2207        u32 isr_status;
2208
2209        spin_lock(&vptr->lock);
2210        isr_status = mac_read_isr(vptr->mac_regs);
2211
2212        /* Not us ? */
2213        if (isr_status == 0) {
2214                spin_unlock(&vptr->lock);
2215                return IRQ_NONE;
2216        }
2217
2218        /* Ack the interrupt */
2219        mac_write_isr(vptr->mac_regs, isr_status);
2220
2221        if (likely(napi_schedule_prep(&vptr->napi))) {
2222                mac_disable_int(vptr->mac_regs);
2223                __napi_schedule(&vptr->napi);
2224        }
2225
2226        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2227                velocity_error(vptr, isr_status);
2228
2229        spin_unlock(&vptr->lock);
2230
2231        return IRQ_HANDLED;
2232}
2233
2234/**
2235 *      velocity_open           -       interface activation callback
2236 *      @dev: network layer device to open
2237 *
2238 *      Called when the network layer brings the interface up. Returns
2239 *      a negative posix error code on failure, or zero on success.
2240 *
2241 *      All the ring allocation and set up is done on open for this
2242 *      adapter to minimise memory usage when inactive
2243 */
2244static int velocity_open(struct net_device *dev)
2245{
2246        struct velocity_info *vptr = netdev_priv(dev);
2247        int ret;
2248
2249        ret = velocity_init_rings(vptr, dev->mtu);
2250        if (ret < 0)
2251                goto out;
2252
2253        /* Ensure chip is running */
2254        pci_set_power_state(vptr->pdev, PCI_D0);
2255
2256        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2257
2258        ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2259                          dev->name, dev);
2260        if (ret < 0) {
2261                /* Power down the chip */
2262                pci_set_power_state(vptr->pdev, PCI_D3hot);
2263                velocity_free_rings(vptr);
2264                goto out;
2265        }
2266
2267        velocity_give_many_rx_descs(vptr);
2268
2269        mac_enable_int(vptr->mac_regs);
2270        netif_start_queue(dev);
2271        napi_enable(&vptr->napi);
2272        vptr->flags |= VELOCITY_FLAGS_OPENED;
2273out:
2274        return ret;
2275}
2276
2277/**
2278 *      velocity_shutdown       -       shut down the chip
2279 *      @vptr: velocity to deactivate
2280 *
2281 *      Shuts down the internal operations of the velocity and
2282 *      disables interrupts, autopolling, transmit and receive
2283 */
2284static void velocity_shutdown(struct velocity_info *vptr)
2285{
2286        struct mac_regs __iomem *regs = vptr->mac_regs;
2287        mac_disable_int(regs);
2288        writel(CR0_STOP, &regs->CR0Set);
2289        writew(0xFFFF, &regs->TDCSRClr);
2290        writeb(0xFF, &regs->RDCSRClr);
2291        safe_disable_mii_autopoll(regs);
2292        mac_clear_isr(regs);
2293}
2294
2295/**
2296 *      velocity_change_mtu     -       MTU change callback
2297 *      @dev: network device
2298 *      @new_mtu: desired MTU
2299 *
2300 *      Handle requests from the networking layer for MTU change on
2301 *      this interface. It gets called on a change by the network layer.
2302 *      Return zero for success or negative posix error code.
2303 */
2304static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2305{
2306        struct velocity_info *vptr = netdev_priv(dev);
2307        int ret = 0;
2308
2309        if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2310                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2311                                vptr->dev->name);
2312                ret = -EINVAL;
2313                goto out_0;
2314        }
2315
2316        if (!netif_running(dev)) {
2317                dev->mtu = new_mtu;
2318                goto out_0;
2319        }
2320
2321        if (dev->mtu != new_mtu) {
2322                struct velocity_info *tmp_vptr;
2323                unsigned long flags;
2324                struct rx_info rx;
2325                struct tx_info tx;
2326
2327                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2328                if (!tmp_vptr) {
2329                        ret = -ENOMEM;
2330                        goto out_0;
2331                }
2332
2333                tmp_vptr->dev = dev;
2334                tmp_vptr->pdev = vptr->pdev;
2335                tmp_vptr->options = vptr->options;
2336                tmp_vptr->tx.numq = vptr->tx.numq;
2337
2338                ret = velocity_init_rings(tmp_vptr, new_mtu);
2339                if (ret < 0)
2340                        goto out_free_tmp_vptr_1;
2341
2342                spin_lock_irqsave(&vptr->lock, flags);
2343
2344                netif_stop_queue(dev);
2345                velocity_shutdown(vptr);
2346
2347                rx = vptr->rx;
2348                tx = vptr->tx;
2349
2350                vptr->rx = tmp_vptr->rx;
2351                vptr->tx = tmp_vptr->tx;
2352
2353                tmp_vptr->rx = rx;
2354                tmp_vptr->tx = tx;
2355
2356                dev->mtu = new_mtu;
2357
2358                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2359
2360                velocity_give_many_rx_descs(vptr);
2361
2362                mac_enable_int(vptr->mac_regs);
2363                netif_start_queue(dev);
2364
2365                spin_unlock_irqrestore(&vptr->lock, flags);
2366
2367                velocity_free_rings(tmp_vptr);
2368
2369out_free_tmp_vptr_1:
2370                kfree(tmp_vptr);
2371        }
2372out_0:
2373        return ret;
2374}
2375
2376/**
2377 *      velocity_mii_ioctl              -       MII ioctl handler
2378 *      @dev: network device
2379 *      @ifr: the ifreq block for the ioctl
2380 *      @cmd: the command
2381 *
2382 *      Process MII requests made via ioctl from the network layer. These
2383 *      are used by tools like kudzu to interrogate the link state of the
2384 *      hardware
2385 */
2386static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2387{
2388        struct velocity_info *vptr = netdev_priv(dev);
2389        struct mac_regs __iomem *regs = vptr->mac_regs;
2390        unsigned long flags;
2391        struct mii_ioctl_data *miidata = if_mii(ifr);
2392        int err;
2393
2394        switch (cmd) {
2395        case SIOCGMIIPHY:
2396                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2397                break;
2398        case SIOCGMIIREG:
2399                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2400                        return -ETIMEDOUT;
2401                break;
2402        case SIOCSMIIREG:
2403                spin_lock_irqsave(&vptr->lock, flags);
2404                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2405                spin_unlock_irqrestore(&vptr->lock, flags);
2406                check_connection_type(vptr->mac_regs);
2407                if (err)
2408                        return err;
2409                break;
2410        default:
2411                return -EOPNOTSUPP;
2412        }
2413        return 0;
2414}
2415
2416
2417/**
2418 *      velocity_ioctl          -       ioctl entry point
2419 *      @dev: network device
2420 *      @rq: interface request ioctl
2421 *      @cmd: command code
2422 *
2423 *      Called when the user issues an ioctl request to the network
2424 *      device in question. The velocity interface supports MII.
2425 */
2426static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2427{
2428        struct velocity_info *vptr = netdev_priv(dev);
2429        int ret;
2430
2431        /* If we are asked for information and the device is power
2432           saving then we need to bring the device back up to talk to it */
2433
2434        if (!netif_running(dev))
2435                pci_set_power_state(vptr->pdev, PCI_D0);
2436
2437        switch (cmd) {
2438        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2439        case SIOCGMIIREG:       /* Read MII PHY register. */
2440        case SIOCSMIIREG:       /* Write to MII PHY register. */
2441                ret = velocity_mii_ioctl(dev, rq, cmd);
2442                break;
2443
2444        default:
2445                ret = -EOPNOTSUPP;
2446        }
2447        if (!netif_running(dev))
2448                pci_set_power_state(vptr->pdev, PCI_D3hot);
2449
2450
2451        return ret;
2452}
2453
2454/**
2455 *      velocity_get_status     -       statistics callback
2456 *      @dev: network device
2457 *
2458 *      Callback from the network layer to allow driver statistics
2459 *      to be resynchronized with hardware collected state. In the
2460 *      case of the velocity we need to pull the MIB counters from
2461 *      the hardware into the counters before letting the network
2462 *      layer display them.
2463 */
2464static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2465{
2466        struct velocity_info *vptr = netdev_priv(dev);
2467
2468        /* If the hardware is down, don't touch MII */
2469        if (!netif_running(dev))
2470                return &dev->stats;
2471
2472        spin_lock_irq(&vptr->lock);
2473        velocity_update_hw_mibs(vptr);
2474        spin_unlock_irq(&vptr->lock);
2475
2476        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2477        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2478        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2479
2480//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2481        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2482        /* detailed rx_errors: */
2483//  unsigned long   rx_length_errors;
2484//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2485        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2486//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2487//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2488//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2489
2490        /* detailed tx_errors */
2491//  unsigned long   tx_fifo_errors;
2492
2493        return &dev->stats;
2494}
2495
2496/**
2497 *      velocity_close          -       close adapter callback
2498 *      @dev: network device
2499 *
2500 *      Callback from the network layer when the velocity is being
2501 *      deactivated by the network layer
2502 */
2503static int velocity_close(struct net_device *dev)
2504{
2505        struct velocity_info *vptr = netdev_priv(dev);
2506
2507        napi_disable(&vptr->napi);
2508        netif_stop_queue(dev);
2509        velocity_shutdown(vptr);
2510
2511        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2512                velocity_get_ip(vptr);
2513        if (dev->irq != 0)
2514                free_irq(dev->irq, dev);
2515
2516        /* Power down the chip */
2517        pci_set_power_state(vptr->pdev, PCI_D3hot);
2518
2519        velocity_free_rings(vptr);
2520
2521        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2522        return 0;
2523}
2524
2525/**
2526 *      velocity_xmit           -       transmit packet callback
2527 *      @skb: buffer to transmit
2528 *      @dev: network device
2529 *
2530 *      Called by the networ layer to request a packet is queued to
2531 *      the velocity. Returns zero on success.
2532 */
2533static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2534                                 struct net_device *dev)
2535{
2536        struct velocity_info *vptr = netdev_priv(dev);
2537        int qnum = 0;
2538        struct tx_desc *td_ptr;
2539        struct velocity_td_info *tdinfo;
2540        unsigned long flags;
2541        int pktlen;
2542        int index, prev;
2543        int i = 0;
2544
2545        if (skb_padto(skb, ETH_ZLEN))
2546                goto out;
2547
2548        /* The hardware can handle at most 7 memory segments, so merge
2549         * the skb if there are more */
2550        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2551                kfree_skb(skb);
2552                return NETDEV_TX_OK;
2553        }
2554
2555        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2556                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2557                                skb_headlen(skb);
2558
2559        spin_lock_irqsave(&vptr->lock, flags);
2560
2561        index = vptr->tx.curr[qnum];
2562        td_ptr = &(vptr->tx.rings[qnum][index]);
2563        tdinfo = &(vptr->tx.infos[qnum][index]);
2564
2565        td_ptr->tdesc1.TCR = TCR0_TIC;
2566        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2567
2568        /*
2569         *      Map the linear network buffer into PCI space and
2570         *      add it to the transmit ring.
2571         */
2572        tdinfo->skb = skb;
2573        tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2574        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2575        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2576        td_ptr->td_buf[0].pa_high = 0;
2577        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2578
2579        /* Handle fragments */
2580        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2581                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2582
2583                tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2584                                frag->page_offset, frag->size,
2585                                PCI_DMA_TODEVICE);
2586
2587                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2588                td_ptr->td_buf[i + 1].pa_high = 0;
2589                td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2590        }
2591        tdinfo->nskb_dma = i + 1;
2592
2593        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2594
2595        if (vlan_tx_tag_present(skb)) {
2596                td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2597                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2598        }
2599
2600        /*
2601         *      Handle hardware checksum
2602         */
2603        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604                const struct iphdr *ip = ip_hdr(skb);
2605                if (ip->protocol == IPPROTO_TCP)
2606                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2607                else if (ip->protocol == IPPROTO_UDP)
2608                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2609                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2610        }
2611
2612        prev = index - 1;
2613        if (prev < 0)
2614                prev = vptr->options.numtx - 1;
2615        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2616        vptr->tx.used[qnum]++;
2617        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2618
2619        if (AVAIL_TD(vptr, qnum) < 1)
2620                netif_stop_queue(dev);
2621
2622        td_ptr = &(vptr->tx.rings[qnum][prev]);
2623        td_ptr->td_buf[0].size |= TD_QUEUE;
2624        mac_tx_queue_wake(vptr->mac_regs, qnum);
2625
2626        spin_unlock_irqrestore(&vptr->lock, flags);
2627out:
2628        return NETDEV_TX_OK;
2629}
2630
2631
2632static const struct net_device_ops velocity_netdev_ops = {
2633        .ndo_open               = velocity_open,
2634        .ndo_stop               = velocity_close,
2635        .ndo_start_xmit         = velocity_xmit,
2636        .ndo_get_stats          = velocity_get_stats,
2637        .ndo_validate_addr      = eth_validate_addr,
2638        .ndo_set_mac_address    = eth_mac_addr,
2639        .ndo_set_multicast_list = velocity_set_multi,
2640        .ndo_change_mtu         = velocity_change_mtu,
2641        .ndo_do_ioctl           = velocity_ioctl,
2642        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2643        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2644        .ndo_vlan_rx_register   = velocity_vlan_rx_register,
2645};
2646
2647/**
2648 *      velocity_init_info      -       init private data
2649 *      @pdev: PCI device
2650 *      @vptr: Velocity info
2651 *      @info: Board type
2652 *
2653 *      Set up the initial velocity_info struct for the device that has been
2654 *      discovered.
2655 */
2656static void __devinit velocity_init_info(struct pci_dev *pdev,
2657                                         struct velocity_info *vptr,
2658                                         const struct velocity_info_tbl *info)
2659{
2660        memset(vptr, 0, sizeof(struct velocity_info));
2661
2662        vptr->pdev = pdev;
2663        vptr->chip_id = info->chip_id;
2664        vptr->tx.numq = info->txqueue;
2665        vptr->multicast_limit = MCAM_SIZE;
2666        spin_lock_init(&vptr->lock);
2667}
2668
2669/**
2670 *      velocity_get_pci_info   -       retrieve PCI info for device
2671 *      @vptr: velocity device
2672 *      @pdev: PCI device it matches
2673 *
2674 *      Retrieve the PCI configuration space data that interests us from
2675 *      the kernel PCI layer
2676 */
2677static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2678{
2679        vptr->rev_id = pdev->revision;
2680
2681        pci_set_master(pdev);
2682
2683        vptr->ioaddr = pci_resource_start(pdev, 0);
2684        vptr->memaddr = pci_resource_start(pdev, 1);
2685
2686        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2687                dev_err(&pdev->dev,
2688                           "region #0 is not an I/O resource, aborting.\n");
2689                return -EINVAL;
2690        }
2691
2692        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2693                dev_err(&pdev->dev,
2694                           "region #1 is an I/O resource, aborting.\n");
2695                return -EINVAL;
2696        }
2697
2698        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2699                dev_err(&pdev->dev, "region #1 is too small.\n");
2700                return -EINVAL;
2701        }
2702        vptr->pdev = pdev;
2703
2704        return 0;
2705}
2706
2707/**
2708 *      velocity_print_info     -       per driver data
2709 *      @vptr: velocity
2710 *
2711 *      Print per driver data as the kernel driver finds Velocity
2712 *      hardware
2713 */
2714static void __devinit velocity_print_info(struct velocity_info *vptr)
2715{
2716        struct net_device *dev = vptr->dev;
2717
2718        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2719        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2720                dev->name, dev->dev_addr);
2721}
2722
2723static u32 velocity_get_link(struct net_device *dev)
2724{
2725        struct velocity_info *vptr = netdev_priv(dev);
2726        struct mac_regs __iomem *regs = vptr->mac_regs;
2727        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2728}
2729
2730
2731/**
2732 *      velocity_found1         -       set up discovered velocity card
2733 *      @pdev: PCI device
2734 *      @ent: PCI device table entry that matched
2735 *
2736 *      Configure a discovered adapter from scratch. Return a negative
2737 *      errno error code on failure paths.
2738 */
2739static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2740{
2741        static int first = 1;
2742        struct net_device *dev;
2743        int i;
2744        const char *drv_string;
2745        const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2746        struct velocity_info *vptr;
2747        struct mac_regs __iomem *regs;
2748        int ret = -ENOMEM;
2749
2750        /* FIXME: this driver, like almost all other ethernet drivers,
2751         * can support more than MAX_UNITS.
2752         */
2753        if (velocity_nics >= MAX_UNITS) {
2754                dev_notice(&pdev->dev, "already found %d NICs.\n",
2755                           velocity_nics);
2756                return -ENODEV;
2757        }
2758
2759        dev = alloc_etherdev(sizeof(struct velocity_info));
2760        if (!dev) {
2761                dev_err(&pdev->dev, "allocate net device failed.\n");
2762                goto out;
2763        }
2764
2765        /* Chain it all together */
2766
2767        SET_NETDEV_DEV(dev, &pdev->dev);
2768        vptr = netdev_priv(dev);
2769
2770
2771        if (first) {
2772                printk(KERN_INFO "%s Ver. %s\n",
2773                        VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2774                printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2775                printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2776                first = 0;
2777        }
2778
2779        velocity_init_info(pdev, vptr, info);
2780
2781        vptr->dev = dev;
2782
2783        ret = pci_enable_device(pdev);
2784        if (ret < 0)
2785                goto err_free_dev;
2786
2787        dev->irq = pdev->irq;
2788
2789        ret = velocity_get_pci_info(vptr, pdev);
2790        if (ret < 0) {
2791                /* error message already printed */
2792                goto err_disable;
2793        }
2794
2795        ret = pci_request_regions(pdev, VELOCITY_NAME);
2796        if (ret < 0) {
2797                dev_err(&pdev->dev, "No PCI resources.\n");
2798                goto err_disable;
2799        }
2800
2801        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2802        if (regs == NULL) {
2803                ret = -EIO;
2804                goto err_release_res;
2805        }
2806
2807        vptr->mac_regs = regs;
2808
2809        mac_wol_reset(regs);
2810
2811        dev->base_addr = vptr->ioaddr;
2812
2813        for (i = 0; i < 6; i++)
2814                dev->dev_addr[i] = readb(&regs->PAR[i]);
2815
2816
2817        drv_string = dev_driver_string(&pdev->dev);
2818
2819        velocity_get_options(&vptr->options, velocity_nics, drv_string);
2820
2821        /*
2822         *      Mask out the options cannot be set to the chip
2823         */
2824
2825        vptr->options.flags &= info->flags;
2826
2827        /*
2828         *      Enable the chip specified capbilities
2829         */
2830
2831        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2832
2833        vptr->wol_opts = vptr->options.wol_opts;
2834        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2835
2836        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2837
2838        dev->irq = pdev->irq;
2839        dev->netdev_ops = &velocity_netdev_ops;
2840        dev->ethtool_ops = &velocity_ethtool_ops;
2841        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2842
2843        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2844        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2845                NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2846
2847        ret = register_netdev(dev);
2848        if (ret < 0)
2849                goto err_iounmap;
2850
2851        if (!velocity_get_link(dev)) {
2852                netif_carrier_off(dev);
2853                vptr->mii_status |= VELOCITY_LINK_FAIL;
2854        }
2855
2856        velocity_print_info(vptr);
2857        pci_set_drvdata(pdev, dev);
2858
2859        /* and leave the chip powered down */
2860
2861        pci_set_power_state(pdev, PCI_D3hot);
2862        velocity_nics++;
2863out:
2864        return ret;
2865
2866err_iounmap:
2867        iounmap(regs);
2868err_release_res:
2869        pci_release_regions(pdev);
2870err_disable:
2871        pci_disable_device(pdev);
2872err_free_dev:
2873        free_netdev(dev);
2874        goto out;
2875}
2876
2877
2878#ifdef CONFIG_PM
2879/**
2880 *      wol_calc_crc            -       WOL CRC
2881 *      @pattern: data pattern
2882 *      @mask_pattern: mask
2883 *
2884 *      Compute the wake on lan crc hashes for the packet header
2885 *      we are interested in.
2886 */
2887static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2888{
2889        u16 crc = 0xFFFF;
2890        u8 mask;
2891        int i, j;
2892
2893        for (i = 0; i < size; i++) {
2894                mask = mask_pattern[i];
2895
2896                /* Skip this loop if the mask equals to zero */
2897                if (mask == 0x00)
2898                        continue;
2899
2900                for (j = 0; j < 8; j++) {
2901                        if ((mask & 0x01) == 0) {
2902                                mask >>= 1;
2903                                continue;
2904                        }
2905                        mask >>= 1;
2906                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2907                }
2908        }
2909        /*      Finally, invert the result once to get the correct data */
2910        crc = ~crc;
2911        return bitrev32(crc) >> 16;
2912}
2913
2914/**
2915 *      velocity_set_wol        -       set up for wake on lan
2916 *      @vptr: velocity to set WOL status on
2917 *
2918 *      Set a card up for wake on lan either by unicast or by
2919 *      ARP packet.
2920 *
2921 *      FIXME: check static buffer is safe here
2922 */
2923static int velocity_set_wol(struct velocity_info *vptr)
2924{
2925        struct mac_regs __iomem *regs = vptr->mac_regs;
2926        enum speed_opt spd_dpx = vptr->options.spd_dpx;
2927        static u8 buf[256];
2928        int i;
2929
2930        static u32 mask_pattern[2][4] = {
2931                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2932                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
2933        };
2934
2935        writew(0xFFFF, &regs->WOLCRClr);
2936        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2937        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2938
2939        /*
2940           if (vptr->wol_opts & VELOCITY_WOL_PHY)
2941           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2942         */
2943
2944        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2945                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2946
2947        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2948                struct arp_packet *arp = (struct arp_packet *) buf;
2949                u16 crc;
2950                memset(buf, 0, sizeof(struct arp_packet) + 7);
2951
2952                for (i = 0; i < 4; i++)
2953                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2954
2955                arp->type = htons(ETH_P_ARP);
2956                arp->ar_op = htons(1);
2957
2958                memcpy(arp->ar_tip, vptr->ip_addr, 4);
2959
2960                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2961                                (u8 *) & mask_pattern[0][0]);
2962
2963                writew(crc, &regs->PatternCRC[0]);
2964                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2965        }
2966
2967        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2968        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2969
2970        writew(0x0FFF, &regs->WOLSRClr);
2971
2972        if (spd_dpx == SPD_DPX_1000_FULL)
2973                goto mac_done;
2974
2975        if (spd_dpx != SPD_DPX_AUTO)
2976                goto advertise_done;
2977
2978        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2979                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2980                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2981
2982                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2983        }
2984
2985        if (vptr->mii_status & VELOCITY_SPEED_1000)
2986                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2987
2988advertise_done:
2989        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2990
2991        {
2992                u8 GCR;
2993                GCR = readb(&regs->CHIPGCR);
2994                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2995                writeb(GCR, &regs->CHIPGCR);
2996        }
2997
2998mac_done:
2999        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3000        /* Turn on SWPTAG just before entering power mode */
3001        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3002        /* Go to bed ..... */
3003        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3004
3005        return 0;
3006}
3007
3008/**
3009 *      velocity_save_context   -       save registers
3010 *      @vptr: velocity
3011 *      @context: buffer for stored context
3012 *
3013 *      Retrieve the current configuration from the velocity hardware
3014 *      and stash it in the context structure, for use by the context
3015 *      restore functions. This allows us to save things we need across
3016 *      power down states
3017 */
3018static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3019{
3020        struct mac_regs __iomem *regs = vptr->mac_regs;
3021        u16 i;
3022        u8 __iomem *ptr = (u8 __iomem *)regs;
3023
3024        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3025                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3026
3027        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3028                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3029
3030        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3031                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3032
3033}
3034
3035static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3036{
3037        struct net_device *dev = pci_get_drvdata(pdev);
3038        struct velocity_info *vptr = netdev_priv(dev);
3039        unsigned long flags;
3040
3041        if (!netif_running(vptr->dev))
3042                return 0;
3043
3044        netif_device_detach(vptr->dev);
3045
3046        spin_lock_irqsave(&vptr->lock, flags);
3047        pci_save_state(pdev);
3048#ifdef ETHTOOL_GWOL
3049        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3050                velocity_get_ip(vptr);
3051                velocity_save_context(vptr, &vptr->context);
3052                velocity_shutdown(vptr);
3053                velocity_set_wol(vptr);
3054                pci_enable_wake(pdev, PCI_D3hot, 1);
3055                pci_set_power_state(pdev, PCI_D3hot);
3056        } else {
3057                velocity_save_context(vptr, &vptr->context);
3058                velocity_shutdown(vptr);
3059                pci_disable_device(pdev);
3060                pci_set_power_state(pdev, pci_choose_state(pdev, state));
3061        }
3062#else
3063        pci_set_power_state(pdev, pci_choose_state(pdev, state));
3064#endif
3065        spin_unlock_irqrestore(&vptr->lock, flags);
3066        return 0;
3067}
3068
3069/**
3070 *      velocity_restore_context        -       restore registers
3071 *      @vptr: velocity
3072 *      @context: buffer for stored context
3073 *
3074 *      Reload the register configuration from the velocity context
3075 *      created by velocity_save_context.
3076 */
3077static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3078{
3079        struct mac_regs __iomem *regs = vptr->mac_regs;
3080        int i;
3081        u8 __iomem *ptr = (u8 __iomem *)regs;
3082
3083        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3084                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3085
3086        /* Just skip cr0 */
3087        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3088                /* Clear */
3089                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3090                /* Set */
3091                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3092        }
3093
3094        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3095                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3096
3097        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3098                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3099
3100        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3101                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3102}
3103
3104static int velocity_resume(struct pci_dev *pdev)
3105{
3106        struct net_device *dev = pci_get_drvdata(pdev);
3107        struct velocity_info *vptr = netdev_priv(dev);
3108        unsigned long flags;
3109        int i;
3110
3111        if (!netif_running(vptr->dev))
3112                return 0;
3113
3114        pci_set_power_state(pdev, PCI_D0);
3115        pci_enable_wake(pdev, 0, 0);
3116        pci_restore_state(pdev);
3117
3118        mac_wol_reset(vptr->mac_regs);
3119
3120        spin_lock_irqsave(&vptr->lock, flags);
3121        velocity_restore_context(vptr, &vptr->context);
3122        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3123        mac_disable_int(vptr->mac_regs);
3124
3125        velocity_tx_srv(vptr);
3126
3127        for (i = 0; i < vptr->tx.numq; i++) {
3128                if (vptr->tx.used[i])
3129                        mac_tx_queue_wake(vptr->mac_regs, i);
3130        }
3131
3132        mac_enable_int(vptr->mac_regs);
3133        spin_unlock_irqrestore(&vptr->lock, flags);
3134        netif_device_attach(vptr->dev);
3135
3136        return 0;
3137}
3138#endif
3139
3140/*
3141 *      Definition for our device driver. The PCI layer interface
3142 *      uses this to handle all our card discover and plugging
3143 */
3144static struct pci_driver velocity_driver = {
3145      .name     = VELOCITY_NAME,
3146      .id_table = velocity_id_table,
3147      .probe    = velocity_found1,
3148      .remove   = __devexit_p(velocity_remove1),
3149#ifdef CONFIG_PM
3150      .suspend  = velocity_suspend,
3151      .resume   = velocity_resume,
3152#endif
3153};
3154
3155
3156/**
3157 *      velocity_ethtool_up     -       pre hook for ethtool
3158 *      @dev: network device
3159 *
3160 *      Called before an ethtool operation. We need to make sure the
3161 *      chip is out of D3 state before we poke at it.
3162 */
3163static int velocity_ethtool_up(struct net_device *dev)
3164{
3165        struct velocity_info *vptr = netdev_priv(dev);
3166        if (!netif_running(dev))
3167                pci_set_power_state(vptr->pdev, PCI_D0);
3168        return 0;
3169}
3170
3171/**
3172 *      velocity_ethtool_down   -       post hook for ethtool
3173 *      @dev: network device
3174 *
3175 *      Called after an ethtool operation. Restore the chip back to D3
3176 *      state if it isn't running.
3177 */
3178static void velocity_ethtool_down(struct net_device *dev)
3179{
3180        struct velocity_info *vptr = netdev_priv(dev);
3181        if (!netif_running(dev))
3182                pci_set_power_state(vptr->pdev, PCI_D3hot);
3183}
3184
3185static int velocity_get_settings(struct net_device *dev,
3186                                 struct ethtool_cmd *cmd)
3187{
3188        struct velocity_info *vptr = netdev_priv(dev);
3189        struct mac_regs __iomem *regs = vptr->mac_regs;
3190        u32 status;
3191        status = check_connection_type(vptr->mac_regs);
3192
3193        cmd->supported = SUPPORTED_TP |
3194                        SUPPORTED_Autoneg |
3195                        SUPPORTED_10baseT_Half |
3196                        SUPPORTED_10baseT_Full |
3197                        SUPPORTED_100baseT_Half |
3198                        SUPPORTED_100baseT_Full |
3199                        SUPPORTED_1000baseT_Half |
3200                        SUPPORTED_1000baseT_Full;
3201
3202        cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3203        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3204                cmd->advertising |=
3205                        ADVERTISED_10baseT_Half |
3206                        ADVERTISED_10baseT_Full |
3207                        ADVERTISED_100baseT_Half |
3208                        ADVERTISED_100baseT_Full |
3209                        ADVERTISED_1000baseT_Half |
3210                        ADVERTISED_1000baseT_Full;
3211        } else {
3212                switch (vptr->options.spd_dpx) {
3213                case SPD_DPX_1000_FULL:
3214                        cmd->advertising |= ADVERTISED_1000baseT_Full;
3215                        break;
3216                case SPD_DPX_100_HALF:
3217                        cmd->advertising |= ADVERTISED_100baseT_Half;
3218                        break;
3219                case SPD_DPX_100_FULL:
3220                        cmd->advertising |= ADVERTISED_100baseT_Full;
3221                        break;
3222                case SPD_DPX_10_HALF:
3223                        cmd->advertising |= ADVERTISED_10baseT_Half;
3224                        break;
3225                case SPD_DPX_10_FULL:
3226                        cmd->advertising |= ADVERTISED_10baseT_Full;
3227                        break;
3228                default:
3229                        break;
3230                }
3231        }
3232
3233        if (status & VELOCITY_SPEED_1000)
3234                ethtool_cmd_speed_set(cmd, SPEED_1000);
3235        else if (status & VELOCITY_SPEED_100)
3236                ethtool_cmd_speed_set(cmd, SPEED_100);
3237        else
3238                ethtool_cmd_speed_set(cmd, SPEED_10);
3239
3240        cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3241        cmd->port = PORT_TP;
3242        cmd->transceiver = XCVR_INTERNAL;
3243        cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3244
3245        if (status & VELOCITY_DUPLEX_FULL)
3246                cmd->duplex = DUPLEX_FULL;
3247        else
3248                cmd->duplex = DUPLEX_HALF;
3249
3250        return 0;
3251}
3252
3253static int velocity_set_settings(struct net_device *dev,
3254                                 struct ethtool_cmd *cmd)
3255{
3256        struct velocity_info *vptr = netdev_priv(dev);
3257        u32 speed = ethtool_cmd_speed(cmd);
3258        u32 curr_status;
3259        u32 new_status = 0;
3260        int ret = 0;
3261
3262        curr_status = check_connection_type(vptr->mac_regs);
3263        curr_status &= (~VELOCITY_LINK_FAIL);
3264
3265        new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3266        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3267        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3268        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3269        new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3270
3271        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3272            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3273                ret = -EINVAL;
3274        } else {
3275                enum speed_opt spd_dpx;
3276
3277                if (new_status & VELOCITY_AUTONEG_ENABLE)
3278                        spd_dpx = SPD_DPX_AUTO;
3279                else if ((new_status & VELOCITY_SPEED_1000) &&
3280                         (new_status & VELOCITY_DUPLEX_FULL)) {
3281                        spd_dpx = SPD_DPX_1000_FULL;
3282                } else if (new_status & VELOCITY_SPEED_100)
3283                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3284                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3285                else if (new_status & VELOCITY_SPEED_10)
3286                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3287                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3288                else
3289                        return -EOPNOTSUPP;
3290
3291                vptr->options.spd_dpx = spd_dpx;
3292
3293                velocity_set_media_mode(vptr, new_status);
3294        }
3295
3296        return ret;
3297}
3298
3299static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3300{
3301        struct velocity_info *vptr = netdev_priv(dev);
3302        strcpy(info->driver, VELOCITY_NAME);
3303        strcpy(info->version, VELOCITY_VERSION);
3304        strcpy(info->bus_info, pci_name(vptr->pdev));
3305}
3306
3307static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3308{
3309        struct velocity_info *vptr = netdev_priv(dev);
3310        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3311        wol->wolopts |= WAKE_MAGIC;
3312        /*
3313           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3314                   wol.wolopts|=WAKE_PHY;
3315                         */
3316        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3317                wol->wolopts |= WAKE_UCAST;
3318        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3319                wol->wolopts |= WAKE_ARP;
3320        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3321}
3322
3323static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3324{
3325        struct velocity_info *vptr = netdev_priv(dev);
3326
3327        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3328                return -EFAULT;
3329        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3330
3331        /*
3332           if (wol.wolopts & WAKE_PHY) {
3333           vptr->wol_opts|=VELOCITY_WOL_PHY;
3334           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3335           }
3336         */
3337
3338        if (wol->wolopts & WAKE_MAGIC) {
3339                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3340                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3341        }
3342        if (wol->wolopts & WAKE_UCAST) {
3343                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3344                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3345        }
3346        if (wol->wolopts & WAKE_ARP) {
3347                vptr->wol_opts |= VELOCITY_WOL_ARP;
3348                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3349        }
3350        memcpy(vptr->wol_passwd, wol->sopass, 6);
3351        return 0;
3352}
3353
3354static u32 velocity_get_msglevel(struct net_device *dev)
3355{
3356        return msglevel;
3357}
3358
3359static void velocity_set_msglevel(struct net_device *dev, u32 value)
3360{
3361         msglevel = value;
3362}
3363
3364static int get_pending_timer_val(int val)
3365{
3366        int mult_bits = val >> 6;
3367        int mult = 1;
3368
3369        switch (mult_bits)
3370        {
3371        case 1:
3372                mult = 4; break;
3373        case 2:
3374                mult = 16; break;
3375        case 3:
3376                mult = 64; break;
3377        case 0:
3378        default:
3379                break;
3380        }
3381
3382        return (val & 0x3f) * mult;
3383}
3384
3385static void set_pending_timer_val(int *val, u32 us)
3386{
3387        u8 mult = 0;
3388        u8 shift = 0;
3389
3390        if (us >= 0x3f) {
3391                mult = 1; /* mult with 4 */
3392                shift = 2;
3393        }
3394        if (us >= 0x3f * 4) {
3395                mult = 2; /* mult with 16 */
3396                shift = 4;
3397        }
3398        if (us >= 0x3f * 16) {
3399                mult = 3; /* mult with 64 */
3400                shift = 6;
3401        }
3402
3403        *val = (mult << 6) | ((us >> shift) & 0x3f);
3404}
3405
3406
3407static int velocity_get_coalesce(struct net_device *dev,
3408                struct ethtool_coalesce *ecmd)
3409{
3410        struct velocity_info *vptr = netdev_priv(dev);
3411
3412        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3413        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3414
3415        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3416        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3417
3418        return 0;
3419}
3420
3421static int velocity_set_coalesce(struct net_device *dev,
3422                struct ethtool_coalesce *ecmd)
3423{
3424        struct velocity_info *vptr = netdev_priv(dev);
3425        int max_us = 0x3f * 64;
3426        unsigned long flags;
3427
3428        /* 6 bits of  */
3429        if (ecmd->tx_coalesce_usecs > max_us)
3430                return -EINVAL;
3431        if (ecmd->rx_coalesce_usecs > max_us)
3432                return -EINVAL;
3433
3434        if (ecmd->tx_max_coalesced_frames > 0xff)
3435                return -EINVAL;
3436        if (ecmd->rx_max_coalesced_frames > 0xff)
3437                return -EINVAL;
3438
3439        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3440        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3441
3442        set_pending_timer_val(&vptr->options.rxqueue_timer,
3443                        ecmd->rx_coalesce_usecs);
3444        set_pending_timer_val(&vptr->options.txqueue_timer,
3445                        ecmd->tx_coalesce_usecs);
3446
3447        /* Setup the interrupt suppression and queue timers */
3448        spin_lock_irqsave(&vptr->lock, flags);
3449        mac_disable_int(vptr->mac_regs);
3450        setup_adaptive_interrupts(vptr);
3451        setup_queue_timers(vptr);
3452
3453        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3454        mac_clear_isr(vptr->mac_regs);
3455        mac_enable_int(vptr->mac_regs);
3456        spin_unlock_irqrestore(&vptr->lock, flags);
3457
3458        return 0;
3459}
3460
3461static const struct ethtool_ops velocity_ethtool_ops = {
3462        .get_settings   =       velocity_get_settings,
3463        .set_settings   =       velocity_set_settings,
3464        .get_drvinfo    =       velocity_get_drvinfo,
3465        .get_wol        =       velocity_ethtool_get_wol,
3466        .set_wol        =       velocity_ethtool_set_wol,
3467        .get_msglevel   =       velocity_get_msglevel,
3468        .set_msglevel   =       velocity_set_msglevel,
3469        .get_link       =       velocity_get_link,
3470        .get_coalesce   =       velocity_get_coalesce,
3471        .set_coalesce   =       velocity_set_coalesce,
3472        .begin          =       velocity_ethtool_up,
3473        .complete       =       velocity_ethtool_down
3474};
3475
3476#ifdef CONFIG_PM
3477#ifdef CONFIG_INET
3478static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3479{
3480        struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3481        struct net_device *dev = ifa->ifa_dev->dev;
3482
3483        if (dev_net(dev) == &init_net &&
3484            dev->netdev_ops == &velocity_netdev_ops)
3485                velocity_get_ip(netdev_priv(dev));
3486
3487        return NOTIFY_DONE;
3488}
3489#endif  /* CONFIG_INET */
3490#endif  /* CONFIG_PM */
3491
3492#if defined(CONFIG_PM) && defined(CONFIG_INET)
3493static struct notifier_block velocity_inetaddr_notifier = {
3494      .notifier_call    = velocity_netdev_event,
3495};
3496
3497static void velocity_register_notifier(void)
3498{
3499        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3500}
3501
3502static void velocity_unregister_notifier(void)
3503{
3504        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3505}
3506
3507#else
3508
3509#define velocity_register_notifier()    do {} while (0)
3510#define velocity_unregister_notifier()  do {} while (0)
3511
3512#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3513
3514/**
3515 *      velocity_init_module    -       load time function
3516 *
3517 *      Called when the velocity module is loaded. The PCI driver
3518 *      is registered with the PCI layer, and in turn will call
3519 *      the probe functions for each velocity adapter installed
3520 *      in the system.
3521 */
3522static int __init velocity_init_module(void)
3523{
3524        int ret;
3525
3526        velocity_register_notifier();
3527        ret = pci_register_driver(&velocity_driver);
3528        if (ret < 0)
3529                velocity_unregister_notifier();
3530        return ret;
3531}
3532
3533/**
3534 *      velocity_cleanup        -       module unload
3535 *
3536 *      When the velocity hardware is unloaded this function is called.
3537 *      It will clean up the notifiers and the unregister the PCI
3538 *      driver interface for this hardware. This in turn cleans up
3539 *      all discovered interfaces before returning from the function
3540 */
3541static void __exit velocity_cleanup_module(void)
3542{
3543        velocity_unregister_notifier();
3544        pci_unregister_driver(&velocity_driver);
3545}
3546
3547module_init(velocity_init_module);
3548module_exit(velocity_cleanup_module);
3549