linux/drivers/net/via-velocity.c
<<
>>
Prefs
   1/*
   2 * This code is derived from the VIA reference driver (copyright message
   3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   4 * addition to the Linux kernel.
   5 *
   6 * The code has been merged into one source file, cleaned up to follow
   7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   8 * for 64bit hardware platforms.
   9 *
  10 * TODO
  11 *      rx_copybreak/alignment
  12 *      More testing
  13 *
  14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  15 * Additional fixes and clean up: Francois Romieu
  16 *
  17 * This source has not been verified for use in safety critical systems.
  18 *
  19 * Please direct queries about the revamped driver to the linux-kernel
  20 * list not VIA.
  21 *
  22 * Original code:
  23 *
  24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  25 * All rights reserved.
  26 *
  27 * This software may be redistributed and/or modified under
  28 * the terms of the GNU General Public License as published by the Free
  29 * Software Foundation; either version 2 of the License, or
  30 * any later version.
  31 *
  32 * This program is distributed in the hope that it will be useful, but
  33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  35 * for more details.
  36 *
  37 * Author: Chuang Liang-Shing, AJ Jiang
  38 *
  39 * Date: Jan 24, 2003
  40 *
  41 * MODULE_LICENSE("GPL");
  42 *
  43 */
  44
  45
  46#include <linux/module.h>
  47#include <linux/types.h>
  48#include <linux/init.h>
  49#include <linux/mm.h>
  50#include <linux/errno.h>
  51#include <linux/ioport.h>
  52#include <linux/pci.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/delay.h>
  58#include <linux/timer.h>
  59#include <linux/slab.h>
  60#include <linux/interrupt.h>
  61#include <linux/string.h>
  62#include <linux/wait.h>
  63#include <linux/io.h>
  64#include <linux/if.h>
  65#include <linux/uaccess.h>
  66#include <linux/proc_fs.h>
  67#include <linux/inetdevice.h>
  68#include <linux/reboot.h>
  69#include <linux/ethtool.h>
  70#include <linux/mii.h>
  71#include <linux/in.h>
  72#include <linux/if_arp.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/udp.h>
  77#include <linux/crc-ccitt.h>
  78#include <linux/crc32.h>
  79
  80#include "via-velocity.h"
  81
  82
  83static int velocity_nics;
  84static int msglevel = MSG_LEVEL_INFO;
  85
  86/**
  87 *      mac_get_cam_mask        -       Read a CAM mask
  88 *      @regs: register block for this velocity
  89 *      @mask: buffer to store mask
  90 *
  91 *      Fetch the mask bits of the selected CAM and store them into the
  92 *      provided mask buffer.
  93 */
  94static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
  95{
  96        int i;
  97
  98        /* Select CAM mask */
  99        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 100
 101        writeb(0, &regs->CAMADDR);
 102
 103        /* read mask */
 104        for (i = 0; i < 8; i++)
 105                *mask++ = readb(&(regs->MARCAM[i]));
 106
 107        /* disable CAMEN */
 108        writeb(0, &regs->CAMADDR);
 109
 110        /* Select mar */
 111        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 112}
 113
 114
 115/**
 116 *      mac_set_cam_mask        -       Set a CAM mask
 117 *      @regs: register block for this velocity
 118 *      @mask: CAM mask to load
 119 *
 120 *      Store a new mask into a CAM
 121 */
 122static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 123{
 124        int i;
 125        /* Select CAM mask */
 126        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 127
 128        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 129
 130        for (i = 0; i < 8; i++)
 131                writeb(*mask++, &(regs->MARCAM[i]));
 132
 133        /* disable CAMEN */
 134        writeb(0, &regs->CAMADDR);
 135
 136        /* Select mar */
 137        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 138}
 139
 140static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 141{
 142        int i;
 143        /* Select CAM mask */
 144        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 145
 146        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 147
 148        for (i = 0; i < 8; i++)
 149                writeb(*mask++, &(regs->MARCAM[i]));
 150
 151        /* disable CAMEN */
 152        writeb(0, &regs->CAMADDR);
 153
 154        /* Select mar */
 155        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 156}
 157
 158/**
 159 *      mac_set_cam     -       set CAM data
 160 *      @regs: register block of this velocity
 161 *      @idx: Cam index
 162 *      @addr: 2 or 6 bytes of CAM data
 163 *
 164 *      Load an address or vlan tag into a CAM
 165 */
 166static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 167{
 168        int i;
 169
 170        /* Select CAM mask */
 171        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 172
 173        idx &= (64 - 1);
 174
 175        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 176
 177        for (i = 0; i < 6; i++)
 178                writeb(*addr++, &(regs->MARCAM[i]));
 179
 180        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 181
 182        udelay(10);
 183
 184        writeb(0, &regs->CAMADDR);
 185
 186        /* Select mar */
 187        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 188}
 189
 190static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 191                             const u8 *addr)
 192{
 193
 194        /* Select CAM mask */
 195        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 196
 197        idx &= (64 - 1);
 198
 199        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 200        writew(*((u16 *) addr), &regs->MARCAM[0]);
 201
 202        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 203
 204        udelay(10);
 205
 206        writeb(0, &regs->CAMADDR);
 207
 208        /* Select mar */
 209        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 210}
 211
 212
 213/**
 214 *      mac_wol_reset   -       reset WOL after exiting low power
 215 *      @regs: register block of this velocity
 216 *
 217 *      Called after we drop out of wake on lan mode in order to
 218 *      reset the Wake on lan features. This function doesn't restore
 219 *      the rest of the logic from the result of sleep/wakeup
 220 */
 221static void mac_wol_reset(struct mac_regs __iomem *regs)
 222{
 223
 224        /* Turn off SWPTAG right after leaving power mode */
 225        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 226        /* clear sticky bits */
 227        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 228
 229        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 230        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 231        /* disable force PME-enable */
 232        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 233        /* disable power-event config bit */
 234        writew(0xFFFF, &regs->WOLCRClr);
 235        /* clear power status */
 236        writew(0xFFFF, &regs->WOLSRClr);
 237}
 238
 239static const struct ethtool_ops velocity_ethtool_ops;
 240
 241/*
 242    Define module options
 243*/
 244
 245MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 246MODULE_LICENSE("GPL");
 247MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 248
 249#define VELOCITY_PARAM(N, D) \
 250        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 251        module_param_array(N, int, NULL, 0); \
 252        MODULE_PARM_DESC(N, D);
 253
 254#define RX_DESC_MIN     64
 255#define RX_DESC_MAX     255
 256#define RX_DESC_DEF     64
 257VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 258
 259#define TX_DESC_MIN     16
 260#define TX_DESC_MAX     256
 261#define TX_DESC_DEF     64
 262VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 263
 264#define RX_THRESH_MIN   0
 265#define RX_THRESH_MAX   3
 266#define RX_THRESH_DEF   0
 267/* rx_thresh[] is used for controlling the receive fifo threshold.
 268   0: indicate the rxfifo threshold is 128 bytes.
 269   1: indicate the rxfifo threshold is 512 bytes.
 270   2: indicate the rxfifo threshold is 1024 bytes.
 271   3: indicate the rxfifo threshold is store & forward.
 272*/
 273VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 274
 275#define DMA_LENGTH_MIN  0
 276#define DMA_LENGTH_MAX  7
 277#define DMA_LENGTH_DEF  6
 278
 279/* DMA_length[] is used for controlling the DMA length
 280   0: 8 DWORDs
 281   1: 16 DWORDs
 282   2: 32 DWORDs
 283   3: 64 DWORDs
 284   4: 128 DWORDs
 285   5: 256 DWORDs
 286   6: SF(flush till emply)
 287   7: SF(flush till emply)
 288*/
 289VELOCITY_PARAM(DMA_length, "DMA length");
 290
 291#define IP_ALIG_DEF     0
 292/* IP_byte_align[] is used for IP header DWORD byte aligned
 293   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 294   1: indicate the IP header will be DWORD byte aligned.
 295      In some enviroment, the IP header should be DWORD byte aligned,
 296      or the packet will be droped when we receive it. (eg: IPVS)
 297*/
 298VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 299
 300#define FLOW_CNTL_DEF   1
 301#define FLOW_CNTL_MIN   1
 302#define FLOW_CNTL_MAX   5
 303
 304/* flow_control[] is used for setting the flow control ability of NIC.
 305   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 306   2: enable TX flow control.
 307   3: enable RX flow control.
 308   4: enable RX/TX flow control.
 309   5: disable
 310*/
 311VELOCITY_PARAM(flow_control, "Enable flow control ability");
 312
 313#define MED_LNK_DEF 0
 314#define MED_LNK_MIN 0
 315#define MED_LNK_MAX 5
 316/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 317   0: indicate autonegotiation for both speed and duplex mode
 318   1: indicate 100Mbps half duplex mode
 319   2: indicate 100Mbps full duplex mode
 320   3: indicate 10Mbps half duplex mode
 321   4: indicate 10Mbps full duplex mode
 322   5: indicate 1000Mbps full duplex mode
 323
 324   Note:
 325   if EEPROM have been set to the force mode, this option is ignored
 326   by driver.
 327*/
 328VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 329
 330#define VAL_PKT_LEN_DEF     0
 331/* ValPktLen[] is used for setting the checksum offload ability of NIC.
 332   0: Receive frame with invalid layer 2 length (Default)
 333   1: Drop frame with invalid layer 2 length
 334*/
 335VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
 336
 337#define WOL_OPT_DEF     0
 338#define WOL_OPT_MIN     0
 339#define WOL_OPT_MAX     7
 340/* wol_opts[] is used for controlling wake on lan behavior.
 341   0: Wake up if recevied a magic packet. (Default)
 342   1: Wake up if link status is on/off.
 343   2: Wake up if recevied an arp packet.
 344   4: Wake up if recevied any unicast packet.
 345   Those value can be sumed up to support more than one option.
 346*/
 347VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 348
 349static int rx_copybreak = 200;
 350module_param(rx_copybreak, int, 0644);
 351MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 352
 353/*
 354 *      Internal board variants. At the moment we have only one
 355 */
 356static struct velocity_info_tbl chip_info_table[] = {
 357        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 358        { }
 359};
 360
 361/*
 362 *      Describe the PCI device identifiers that we support in this
 363 *      device driver. Used for hotplug autoloading.
 364 */
 365static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
 366        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 367        { }
 368};
 369
 370MODULE_DEVICE_TABLE(pci, velocity_id_table);
 371
 372/**
 373 *      get_chip_name   -       identifier to name
 374 *      @id: chip identifier
 375 *
 376 *      Given a chip identifier return a suitable description. Returns
 377 *      a pointer a static string valid while the driver is loaded.
 378 */
 379static const char __devinit *get_chip_name(enum chip_type chip_id)
 380{
 381        int i;
 382        for (i = 0; chip_info_table[i].name != NULL; i++)
 383                if (chip_info_table[i].chip_id == chip_id)
 384                        break;
 385        return chip_info_table[i].name;
 386}
 387
 388/**
 389 *      velocity_remove1        -       device unplug
 390 *      @pdev: PCI device being removed
 391 *
 392 *      Device unload callback. Called on an unplug or on module
 393 *      unload for each active device that is present. Disconnects
 394 *      the device from the network layer and frees all the resources
 395 */
 396static void __devexit velocity_remove1(struct pci_dev *pdev)
 397{
 398        struct net_device *dev = pci_get_drvdata(pdev);
 399        struct velocity_info *vptr = netdev_priv(dev);
 400
 401        unregister_netdev(dev);
 402        iounmap(vptr->mac_regs);
 403        pci_release_regions(pdev);
 404        pci_disable_device(pdev);
 405        pci_set_drvdata(pdev, NULL);
 406        free_netdev(dev);
 407
 408        velocity_nics--;
 409}
 410
 411/**
 412 *      velocity_set_int_opt    -       parser for integer options
 413 *      @opt: pointer to option value
 414 *      @val: value the user requested (or -1 for default)
 415 *      @min: lowest value allowed
 416 *      @max: highest value allowed
 417 *      @def: default value
 418 *      @name: property name
 419 *      @dev: device name
 420 *
 421 *      Set an integer property in the module options. This function does
 422 *      all the verification and checking as well as reporting so that
 423 *      we don't duplicate code for each option.
 424 */
 425static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
 426{
 427        if (val == -1)
 428                *opt = def;
 429        else if (val < min || val > max) {
 430                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 431                                        devname, name, min, max);
 432                *opt = def;
 433        } else {
 434                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 435                                        devname, name, val);
 436                *opt = val;
 437        }
 438}
 439
 440/**
 441 *      velocity_set_bool_opt   -       parser for boolean options
 442 *      @opt: pointer to option value
 443 *      @val: value the user requested (or -1 for default)
 444 *      @def: default value (yes/no)
 445 *      @flag: numeric value to set for true.
 446 *      @name: property name
 447 *      @dev: device name
 448 *
 449 *      Set a boolean property in the module options. This function does
 450 *      all the verification and checking as well as reporting so that
 451 *      we don't duplicate code for each option.
 452 */
 453static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
 454{
 455        (*opt) &= (~flag);
 456        if (val == -1)
 457                *opt |= (def ? flag : 0);
 458        else if (val < 0 || val > 1) {
 459                printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 460                        devname, name);
 461                *opt |= (def ? flag : 0);
 462        } else {
 463                printk(KERN_INFO "%s: set parameter %s to %s\n",
 464                        devname, name, val ? "TRUE" : "FALSE");
 465                *opt |= (val ? flag : 0);
 466        }
 467}
 468
 469/**
 470 *      velocity_get_options    -       set options on device
 471 *      @opts: option structure for the device
 472 *      @index: index of option to use in module options array
 473 *      @devname: device name
 474 *
 475 *      Turn the module and command options into a single structure
 476 *      for the current device
 477 */
 478static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
 479{
 480
 481        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 482        velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 483        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 484        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 485
 486        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 487        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 488        velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 489        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 490        velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 491        opts->numrx = (opts->numrx & ~3);
 492}
 493
 494/**
 495 *      velocity_init_cam_filter        -       initialise CAM
 496 *      @vptr: velocity to program
 497 *
 498 *      Initialize the content addressable memory used for filters. Load
 499 *      appropriately according to the presence of VLAN
 500 */
 501static void velocity_init_cam_filter(struct velocity_info *vptr)
 502{
 503        struct mac_regs __iomem *regs = vptr->mac_regs;
 504
 505        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 506        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 507        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 508
 509        /* Disable all CAMs */
 510        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 511        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 512        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 513        mac_set_cam_mask(regs, vptr->mCAMmask);
 514
 515        /* Enable VCAMs */
 516        if (vptr->vlgrp) {
 517                unsigned int vid, i = 0;
 518
 519                if (!vlan_group_get_device(vptr->vlgrp, 0))
 520                        WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
 521
 522                for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
 523                        if (vlan_group_get_device(vptr->vlgrp, vid)) {
 524                                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 525                                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 526                                if (++i >= VCAM_SIZE)
 527                                        break;
 528                        }
 529                }
 530                mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 531        }
 532}
 533
 534static void velocity_vlan_rx_register(struct net_device *dev,
 535                                      struct vlan_group *grp)
 536{
 537        struct velocity_info *vptr = netdev_priv(dev);
 538
 539        vptr->vlgrp = grp;
 540}
 541
 542static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 543{
 544        struct velocity_info *vptr = netdev_priv(dev);
 545
 546        spin_lock_irq(&vptr->lock);
 547        velocity_init_cam_filter(vptr);
 548        spin_unlock_irq(&vptr->lock);
 549}
 550
 551static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 552{
 553        struct velocity_info *vptr = netdev_priv(dev);
 554
 555        spin_lock_irq(&vptr->lock);
 556        vlan_group_set_device(vptr->vlgrp, vid, NULL);
 557        velocity_init_cam_filter(vptr);
 558        spin_unlock_irq(&vptr->lock);
 559}
 560
 561static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 562{
 563        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 564}
 565
 566/**
 567 *      velocity_rx_reset       -       handle a receive reset
 568 *      @vptr: velocity we are resetting
 569 *
 570 *      Reset the ownership and status for the receive ring side.
 571 *      Hand all the receive queue to the NIC.
 572 */
 573static void velocity_rx_reset(struct velocity_info *vptr)
 574{
 575
 576        struct mac_regs __iomem *regs = vptr->mac_regs;
 577        int i;
 578
 579        velocity_init_rx_ring_indexes(vptr);
 580
 581        /*
 582         *      Init state, all RD entries belong to the NIC
 583         */
 584        for (i = 0; i < vptr->options.numrx; ++i)
 585                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 586
 587        writew(vptr->options.numrx, &regs->RBRDU);
 588        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 589        writew(0, &regs->RDIdx);
 590        writew(vptr->options.numrx - 1, &regs->RDCSize);
 591}
 592
 593/**
 594 *      velocity_get_opt_media_mode     -       get media selection
 595 *      @vptr: velocity adapter
 596 *
 597 *      Get the media mode stored in EEPROM or module options and load
 598 *      mii_status accordingly. The requested link state information
 599 *      is also returned.
 600 */
 601static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 602{
 603        u32 status = 0;
 604
 605        switch (vptr->options.spd_dpx) {
 606        case SPD_DPX_AUTO:
 607                status = VELOCITY_AUTONEG_ENABLE;
 608                break;
 609        case SPD_DPX_100_FULL:
 610                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 611                break;
 612        case SPD_DPX_10_FULL:
 613                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 614                break;
 615        case SPD_DPX_100_HALF:
 616                status = VELOCITY_SPEED_100;
 617                break;
 618        case SPD_DPX_10_HALF:
 619                status = VELOCITY_SPEED_10;
 620                break;
 621        case SPD_DPX_1000_FULL:
 622                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 623                break;
 624        }
 625        vptr->mii_status = status;
 626        return status;
 627}
 628
 629/**
 630 *      safe_disable_mii_autopoll       -       autopoll off
 631 *      @regs: velocity registers
 632 *
 633 *      Turn off the autopoll and wait for it to disable on the chip
 634 */
 635static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 636{
 637        u16 ww;
 638
 639        /*  turn off MAUTO */
 640        writeb(0, &regs->MIICR);
 641        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 642                udelay(1);
 643                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 644                        break;
 645        }
 646}
 647
 648/**
 649 *      enable_mii_autopoll     -       turn on autopolling
 650 *      @regs: velocity registers
 651 *
 652 *      Enable the MII link status autopoll feature on the Velocity
 653 *      hardware. Wait for it to enable.
 654 */
 655static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 656{
 657        int ii;
 658
 659        writeb(0, &(regs->MIICR));
 660        writeb(MIIADR_SWMPL, &regs->MIIADR);
 661
 662        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 663                udelay(1);
 664                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 665                        break;
 666        }
 667
 668        writeb(MIICR_MAUTO, &regs->MIICR);
 669
 670        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 671                udelay(1);
 672                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 673                        break;
 674        }
 675
 676}
 677
 678/**
 679 *      velocity_mii_read       -       read MII data
 680 *      @regs: velocity registers
 681 *      @index: MII register index
 682 *      @data: buffer for received data
 683 *
 684 *      Perform a single read of an MII 16bit register. Returns zero
 685 *      on success or -ETIMEDOUT if the PHY did not respond.
 686 */
 687static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 688{
 689        u16 ww;
 690
 691        /*
 692         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 693         */
 694        safe_disable_mii_autopoll(regs);
 695
 696        writeb(index, &regs->MIIADR);
 697
 698        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 699
 700        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 701                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 702                        break;
 703        }
 704
 705        *data = readw(&regs->MIIDATA);
 706
 707        enable_mii_autopoll(regs);
 708        if (ww == W_MAX_TIMEOUT)
 709                return -ETIMEDOUT;
 710        return 0;
 711}
 712
 713
 714/**
 715 *      mii_check_media_mode    -       check media state
 716 *      @regs: velocity registers
 717 *
 718 *      Check the current MII status and determine the link status
 719 *      accordingly
 720 */
 721static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 722{
 723        u32 status = 0;
 724        u16 ANAR;
 725
 726        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 727                status |= VELOCITY_LINK_FAIL;
 728
 729        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 730                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 731        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 732                status |= (VELOCITY_SPEED_1000);
 733        else {
 734                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 735                if (ANAR & ADVERTISE_100FULL)
 736                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 737                else if (ANAR & ADVERTISE_100HALF)
 738                        status |= VELOCITY_SPEED_100;
 739                else if (ANAR & ADVERTISE_10FULL)
 740                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 741                else
 742                        status |= (VELOCITY_SPEED_10);
 743        }
 744
 745        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 746                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 747                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 748                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 749                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 750                                status |= VELOCITY_AUTONEG_ENABLE;
 751                }
 752        }
 753
 754        return status;
 755}
 756
 757/**
 758 *      velocity_mii_write      -       write MII data
 759 *      @regs: velocity registers
 760 *      @index: MII register index
 761 *      @data: 16bit data for the MII register
 762 *
 763 *      Perform a single write to an MII 16bit register. Returns zero
 764 *      on success or -ETIMEDOUT if the PHY did not respond.
 765 */
 766static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 767{
 768        u16 ww;
 769
 770        /*
 771         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 772         */
 773        safe_disable_mii_autopoll(regs);
 774
 775        /* MII reg offset */
 776        writeb(mii_addr, &regs->MIIADR);
 777        /* set MII data */
 778        writew(data, &regs->MIIDATA);
 779
 780        /* turn on MIICR_WCMD */
 781        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 782
 783        /* W_MAX_TIMEOUT is the timeout period */
 784        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 785                udelay(5);
 786                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 787                        break;
 788        }
 789        enable_mii_autopoll(regs);
 790
 791        if (ww == W_MAX_TIMEOUT)
 792                return -ETIMEDOUT;
 793        return 0;
 794}
 795
 796/**
 797 *      set_mii_flow_control    -       flow control setup
 798 *      @vptr: velocity interface
 799 *
 800 *      Set up the flow control on this interface according to
 801 *      the supplied user/eeprom options.
 802 */
 803static void set_mii_flow_control(struct velocity_info *vptr)
 804{
 805        /*Enable or Disable PAUSE in ANAR */
 806        switch (vptr->options.flow_cntl) {
 807        case FLOW_CNTL_TX:
 808                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 809                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 810                break;
 811
 812        case FLOW_CNTL_RX:
 813                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 814                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 815                break;
 816
 817        case FLOW_CNTL_TX_RX:
 818                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 819                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 820                break;
 821
 822        case FLOW_CNTL_DISABLE:
 823                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 824                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 825                break;
 826        default:
 827                break;
 828        }
 829}
 830
 831/**
 832 *      mii_set_auto_on         -       autonegotiate on
 833 *      @vptr: velocity
 834 *
 835 *      Enable autonegotation on this interface
 836 */
 837static void mii_set_auto_on(struct velocity_info *vptr)
 838{
 839        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 840                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 841        else
 842                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 843}
 844
 845static u32 check_connection_type(struct mac_regs __iomem *regs)
 846{
 847        u32 status = 0;
 848        u8 PHYSR0;
 849        u16 ANAR;
 850        PHYSR0 = readb(&regs->PHYSR0);
 851
 852        /*
 853           if (!(PHYSR0 & PHYSR0_LINKGD))
 854           status|=VELOCITY_LINK_FAIL;
 855         */
 856
 857        if (PHYSR0 & PHYSR0_FDPX)
 858                status |= VELOCITY_DUPLEX_FULL;
 859
 860        if (PHYSR0 & PHYSR0_SPDG)
 861                status |= VELOCITY_SPEED_1000;
 862        else if (PHYSR0 & PHYSR0_SPD10)
 863                status |= VELOCITY_SPEED_10;
 864        else
 865                status |= VELOCITY_SPEED_100;
 866
 867        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 868                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 869                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 870                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 871                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 872                                status |= VELOCITY_AUTONEG_ENABLE;
 873                }
 874        }
 875
 876        return status;
 877}
 878
 879
 880
 881/**
 882 *      velocity_set_media_mode         -       set media mode
 883 *      @mii_status: old MII link state
 884 *
 885 *      Check the media link state and configure the flow control
 886 *      PHY and also velocity hardware setup accordingly. In particular
 887 *      we need to set up CD polling and frame bursting.
 888 */
 889static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 890{
 891        u32 curr_status;
 892        struct mac_regs __iomem *regs = vptr->mac_regs;
 893
 894        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 895        curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 896
 897        /* Set mii link status */
 898        set_mii_flow_control(vptr);
 899
 900        /*
 901           Check if new status is consistent with current status
 902           if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 903               (mii_status==curr_status)) {
 904           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 905           vptr->mii_status=check_connection_type(vptr->mac_regs);
 906           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 907           return 0;
 908           }
 909         */
 910
 911        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 912                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 913
 914        /*
 915         *      If connection type is AUTO
 916         */
 917        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 918                VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 919                /* clear force MAC mode bit */
 920                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 921                /* set duplex mode of MAC according to duplex mode of MII */
 922                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 923                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 924                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 925
 926                /* enable AUTO-NEGO mode */
 927                mii_set_auto_on(vptr);
 928        } else {
 929                u16 CTRL1000;
 930                u16 ANAR;
 931                u8 CHIPGCR;
 932
 933                /*
 934                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 935                 *    and enable it in fullduplex mode
 936                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 937                 * 3. only enable CD heart beat counter in 10HD mode
 938                 */
 939
 940                /* set force MAC mode bit */
 941                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 942
 943                CHIPGCR = readb(&regs->CHIPGCR);
 944
 945                if (mii_status & VELOCITY_SPEED_1000)
 946                        CHIPGCR |= CHIPGCR_FCGMII;
 947                else
 948                        CHIPGCR &= ~CHIPGCR_FCGMII;
 949
 950                if (mii_status & VELOCITY_DUPLEX_FULL) {
 951                        CHIPGCR |= CHIPGCR_FCFDX;
 952                        writeb(CHIPGCR, &regs->CHIPGCR);
 953                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 954                        if (vptr->rev_id < REV_ID_VT3216_A0)
 955                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 956                } else {
 957                        CHIPGCR &= ~CHIPGCR_FCFDX;
 958                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 959                        writeb(CHIPGCR, &regs->CHIPGCR);
 960                        if (vptr->rev_id < REV_ID_VT3216_A0)
 961                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 962                }
 963
 964                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 965                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 966                if ((mii_status & VELOCITY_SPEED_1000) &&
 967                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 968                        CTRL1000 |= ADVERTISE_1000FULL;
 969                }
 970                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 971
 972                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 973                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 974                else
 975                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 976
 977                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 978                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 979                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 980                if (mii_status & VELOCITY_SPEED_100) {
 981                        if (mii_status & VELOCITY_DUPLEX_FULL)
 982                                ANAR |= ADVERTISE_100FULL;
 983                        else
 984                                ANAR |= ADVERTISE_100HALF;
 985                } else if (mii_status & VELOCITY_SPEED_10) {
 986                        if (mii_status & VELOCITY_DUPLEX_FULL)
 987                                ANAR |= ADVERTISE_10FULL;
 988                        else
 989                                ANAR |= ADVERTISE_10HALF;
 990                }
 991                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 992                /* enable AUTO-NEGO mode */
 993                mii_set_auto_on(vptr);
 994                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 995        }
 996        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 997        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 998        return VELOCITY_LINK_CHANGE;
 999}
1000
1001/**
1002 *      velocity_print_link_status      -       link status reporting
1003 *      @vptr: velocity to report on
1004 *
1005 *      Turn the link status of the velocity card into a kernel log
1006 *      description of the new link state, detailing speed and duplex
1007 *      status
1008 */
1009static void velocity_print_link_status(struct velocity_info *vptr)
1010{
1011
1012        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1013                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1014        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1015                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1016
1017                if (vptr->mii_status & VELOCITY_SPEED_1000)
1018                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1019                else if (vptr->mii_status & VELOCITY_SPEED_100)
1020                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1021                else
1022                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1023
1024                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1025                        VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1026                else
1027                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1028        } else {
1029                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1030                switch (vptr->options.spd_dpx) {
1031                case SPD_DPX_1000_FULL:
1032                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1033                        break;
1034                case SPD_DPX_100_HALF:
1035                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1036                        break;
1037                case SPD_DPX_100_FULL:
1038                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1039                        break;
1040                case SPD_DPX_10_HALF:
1041                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1042                        break;
1043                case SPD_DPX_10_FULL:
1044                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1045                        break;
1046                default:
1047                        break;
1048                }
1049        }
1050}
1051
1052/**
1053 *      enable_flow_control_ability     -       flow control
1054 *      @vptr: veloity to configure
1055 *
1056 *      Set up flow control according to the flow control options
1057 *      determined by the eeprom/configuration.
1058 */
1059static void enable_flow_control_ability(struct velocity_info *vptr)
1060{
1061
1062        struct mac_regs __iomem *regs = vptr->mac_regs;
1063
1064        switch (vptr->options.flow_cntl) {
1065
1066        case FLOW_CNTL_DEFAULT:
1067                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1068                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1069                else
1070                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1071
1072                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1073                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1074                else
1075                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1076                break;
1077
1078        case FLOW_CNTL_TX:
1079                writel(CR0_FDXTFCEN, &regs->CR0Set);
1080                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1081                break;
1082
1083        case FLOW_CNTL_RX:
1084                writel(CR0_FDXRFCEN, &regs->CR0Set);
1085                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1086                break;
1087
1088        case FLOW_CNTL_TX_RX:
1089                writel(CR0_FDXTFCEN, &regs->CR0Set);
1090                writel(CR0_FDXRFCEN, &regs->CR0Set);
1091                break;
1092
1093        case FLOW_CNTL_DISABLE:
1094                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1095                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1096                break;
1097
1098        default:
1099                break;
1100        }
1101
1102}
1103
1104/**
1105 *      velocity_soft_reset     -       soft reset
1106 *      @vptr: velocity to reset
1107 *
1108 *      Kick off a soft reset of the velocity adapter and then poll
1109 *      until the reset sequence has completed before returning.
1110 */
1111static int velocity_soft_reset(struct velocity_info *vptr)
1112{
1113        struct mac_regs __iomem *regs = vptr->mac_regs;
1114        int i = 0;
1115
1116        writel(CR0_SFRST, &regs->CR0Set);
1117
1118        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1119                udelay(5);
1120                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1121                        break;
1122        }
1123
1124        if (i == W_MAX_TIMEOUT) {
1125                writel(CR0_FORSRST, &regs->CR0Set);
1126                /* FIXME: PCI POSTING */
1127                /* delay 2ms */
1128                mdelay(2);
1129        }
1130        return 0;
1131}
1132
1133/**
1134 *      velocity_set_multi      -       filter list change callback
1135 *      @dev: network device
1136 *
1137 *      Called by the network layer when the filter lists need to change
1138 *      for a velocity adapter. Reload the CAMs with the new address
1139 *      filter ruleset.
1140 */
1141static void velocity_set_multi(struct net_device *dev)
1142{
1143        struct velocity_info *vptr = netdev_priv(dev);
1144        struct mac_regs __iomem *regs = vptr->mac_regs;
1145        u8 rx_mode;
1146        int i;
1147        struct netdev_hw_addr *ha;
1148
1149        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1150                writel(0xffffffff, &regs->MARCAM[0]);
1151                writel(0xffffffff, &regs->MARCAM[4]);
1152                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1153        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1154                   (dev->flags & IFF_ALLMULTI)) {
1155                writel(0xffffffff, &regs->MARCAM[0]);
1156                writel(0xffffffff, &regs->MARCAM[4]);
1157                rx_mode = (RCR_AM | RCR_AB);
1158        } else {
1159                int offset = MCAM_SIZE - vptr->multicast_limit;
1160                mac_get_cam_mask(regs, vptr->mCAMmask);
1161
1162                i = 0;
1163                netdev_for_each_mc_addr(ha, dev) {
1164                        mac_set_cam(regs, i + offset, ha->addr);
1165                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1166                        i++;
1167                }
1168
1169                mac_set_cam_mask(regs, vptr->mCAMmask);
1170                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1171        }
1172        if (dev->mtu > 1500)
1173                rx_mode |= RCR_AL;
1174
1175        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1176
1177}
1178
1179/*
1180 * MII access , media link mode setting functions
1181 */
1182
1183/**
1184 *      mii_init        -       set up MII
1185 *      @vptr: velocity adapter
1186 *      @mii_status:  links tatus
1187 *
1188 *      Set up the PHY for the current link state.
1189 */
1190static void mii_init(struct velocity_info *vptr, u32 mii_status)
1191{
1192        u16 BMCR;
1193
1194        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1195        case PHYID_CICADA_CS8201:
1196                /*
1197                 *      Reset to hardware default
1198                 */
1199                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1200                /*
1201                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1202                 *      off it in NWay-forced half mode for NWay-forced v.s.
1203                 *      legacy-forced issue.
1204                 */
1205                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1206                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1207                else
1208                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1209                /*
1210                 *      Turn on Link/Activity LED enable bit for CIS8201
1211                 */
1212                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1213                break;
1214        case PHYID_VT3216_32BIT:
1215        case PHYID_VT3216_64BIT:
1216                /*
1217                 *      Reset to hardware default
1218                 */
1219                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1220                /*
1221                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1222                 *      off it in NWay-forced half mode for NWay-forced v.s.
1223                 *      legacy-forced issue
1224                 */
1225                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1226                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1227                else
1228                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1229                break;
1230
1231        case PHYID_MARVELL_1000:
1232        case PHYID_MARVELL_1000S:
1233                /*
1234                 *      Assert CRS on Transmit
1235                 */
1236                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1237                /*
1238                 *      Reset to hardware default
1239                 */
1240                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1241                break;
1242        default:
1243                ;
1244        }
1245        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1246        if (BMCR & BMCR_ISOLATE) {
1247                BMCR &= ~BMCR_ISOLATE;
1248                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1249        }
1250}
1251
1252/**
1253 * setup_queue_timers   -       Setup interrupt timers
1254 *
1255 * Setup interrupt frequency during suppression (timeout if the frame
1256 * count isn't filled).
1257 */
1258static void setup_queue_timers(struct velocity_info *vptr)
1259{
1260        /* Only for newer revisions */
1261        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1262                u8 txqueue_timer = 0;
1263                u8 rxqueue_timer = 0;
1264
1265                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1266                                VELOCITY_SPEED_100)) {
1267                        txqueue_timer = vptr->options.txqueue_timer;
1268                        rxqueue_timer = vptr->options.rxqueue_timer;
1269                }
1270
1271                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1272                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1273        }
1274}
1275/**
1276 * setup_adaptive_interrupts  -  Setup interrupt suppression
1277 *
1278 * @vptr velocity adapter
1279 *
1280 * The velocity is able to suppress interrupt during high interrupt load.
1281 * This function turns on that feature.
1282 */
1283static void setup_adaptive_interrupts(struct velocity_info *vptr)
1284{
1285        struct mac_regs __iomem *regs = vptr->mac_regs;
1286        u16 tx_intsup = vptr->options.tx_intsup;
1287        u16 rx_intsup = vptr->options.rx_intsup;
1288
1289        /* Setup default interrupt mask (will be changed below) */
1290        vptr->int_mask = INT_MASK_DEF;
1291
1292        /* Set Tx Interrupt Suppression Threshold */
1293        writeb(CAMCR_PS0, &regs->CAMCR);
1294        if (tx_intsup != 0) {
1295                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1296                                ISR_PTX2I | ISR_PTX3I);
1297                writew(tx_intsup, &regs->ISRCTL);
1298        } else
1299                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1300
1301        /* Set Rx Interrupt Suppression Threshold */
1302        writeb(CAMCR_PS1, &regs->CAMCR);
1303        if (rx_intsup != 0) {
1304                vptr->int_mask &= ~ISR_PRXI;
1305                writew(rx_intsup, &regs->ISRCTL);
1306        } else
1307                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1308
1309        /* Select page to interrupt hold timer */
1310        writeb(0, &regs->CAMCR);
1311}
1312
1313/**
1314 *      velocity_init_registers -       initialise MAC registers
1315 *      @vptr: velocity to init
1316 *      @type: type of initialisation (hot or cold)
1317 *
1318 *      Initialise the MAC on a reset or on first set up on the
1319 *      hardware.
1320 */
1321static void velocity_init_registers(struct velocity_info *vptr,
1322                                    enum velocity_init_type type)
1323{
1324        struct mac_regs __iomem *regs = vptr->mac_regs;
1325        int i, mii_status;
1326
1327        mac_wol_reset(regs);
1328
1329        switch (type) {
1330        case VELOCITY_INIT_RESET:
1331        case VELOCITY_INIT_WOL:
1332
1333                netif_stop_queue(vptr->dev);
1334
1335                /*
1336                 *      Reset RX to prevent RX pointer not on the 4X location
1337                 */
1338                velocity_rx_reset(vptr);
1339                mac_rx_queue_run(regs);
1340                mac_rx_queue_wake(regs);
1341
1342                mii_status = velocity_get_opt_media_mode(vptr);
1343                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1344                        velocity_print_link_status(vptr);
1345                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1346                                netif_wake_queue(vptr->dev);
1347                }
1348
1349                enable_flow_control_ability(vptr);
1350
1351                mac_clear_isr(regs);
1352                writel(CR0_STOP, &regs->CR0Clr);
1353                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1354                                                        &regs->CR0Set);
1355
1356                break;
1357
1358        case VELOCITY_INIT_COLD:
1359        default:
1360                /*
1361                 *      Do reset
1362                 */
1363                velocity_soft_reset(vptr);
1364                mdelay(5);
1365
1366                mac_eeprom_reload(regs);
1367                for (i = 0; i < 6; i++)
1368                        writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1369
1370                /*
1371                 *      clear Pre_ACPI bit.
1372                 */
1373                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1374                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1375                mac_set_dma_length(regs, vptr->options.DMA_length);
1376
1377                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1378                /*
1379                 *      Back off algorithm use original IEEE standard
1380                 */
1381                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1382
1383                /*
1384                 *      Init CAM filter
1385                 */
1386                velocity_init_cam_filter(vptr);
1387
1388                /*
1389                 *      Set packet filter: Receive directed and broadcast address
1390                 */
1391                velocity_set_multi(vptr->dev);
1392
1393                /*
1394                 *      Enable MII auto-polling
1395                 */
1396                enable_mii_autopoll(regs);
1397
1398                setup_adaptive_interrupts(vptr);
1399
1400                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1401                writew(vptr->options.numrx - 1, &regs->RDCSize);
1402                mac_rx_queue_run(regs);
1403                mac_rx_queue_wake(regs);
1404
1405                writew(vptr->options.numtx - 1, &regs->TDCSize);
1406
1407                for (i = 0; i < vptr->tx.numq; i++) {
1408                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1409                        mac_tx_queue_run(regs, i);
1410                }
1411
1412                init_flow_control_register(vptr);
1413
1414                writel(CR0_STOP, &regs->CR0Clr);
1415                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1416
1417                mii_status = velocity_get_opt_media_mode(vptr);
1418                netif_stop_queue(vptr->dev);
1419
1420                mii_init(vptr, mii_status);
1421
1422                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1423                        velocity_print_link_status(vptr);
1424                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1425                                netif_wake_queue(vptr->dev);
1426                }
1427
1428                enable_flow_control_ability(vptr);
1429                mac_hw_mibs_init(regs);
1430                mac_write_int_mask(vptr->int_mask, regs);
1431                mac_clear_isr(regs);
1432
1433        }
1434}
1435
1436static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1437{
1438        struct mac_regs __iomem *regs = vptr->mac_regs;
1439        int avail, dirty, unusable;
1440
1441        /*
1442         * RD number must be equal to 4X per hardware spec
1443         * (programming guide rev 1.20, p.13)
1444         */
1445        if (vptr->rx.filled < 4)
1446                return;
1447
1448        wmb();
1449
1450        unusable = vptr->rx.filled & 0x0003;
1451        dirty = vptr->rx.dirty - unusable;
1452        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1453                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1454                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1455        }
1456
1457        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1458        vptr->rx.filled = unusable;
1459}
1460
1461/**
1462 *      velocity_init_dma_rings -       set up DMA rings
1463 *      @vptr: Velocity to set up
1464 *
1465 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1466 *      to use.
1467 */
1468static int velocity_init_dma_rings(struct velocity_info *vptr)
1469{
1470        struct velocity_opt *opt = &vptr->options;
1471        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1472        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1473        struct pci_dev *pdev = vptr->pdev;
1474        dma_addr_t pool_dma;
1475        void *pool;
1476        unsigned int i;
1477
1478        /*
1479         * Allocate all RD/TD rings a single pool.
1480         *
1481         * pci_alloc_consistent() fulfills the requirement for 64 bytes
1482         * alignment
1483         */
1484        pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1485                                    rx_ring_size, &pool_dma);
1486        if (!pool) {
1487                dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1488                        vptr->dev->name);
1489                return -ENOMEM;
1490        }
1491
1492        vptr->rx.ring = pool;
1493        vptr->rx.pool_dma = pool_dma;
1494
1495        pool += rx_ring_size;
1496        pool_dma += rx_ring_size;
1497
1498        for (i = 0; i < vptr->tx.numq; i++) {
1499                vptr->tx.rings[i] = pool;
1500                vptr->tx.pool_dma[i] = pool_dma;
1501                pool += tx_ring_size;
1502                pool_dma += tx_ring_size;
1503        }
1504
1505        return 0;
1506}
1507
1508static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1509{
1510        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1511}
1512
1513/**
1514 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1515 *      @vptr: velocity
1516 *      @idx: ring index
1517 *
1518 *      Allocate a new full sized buffer for the reception of a frame and
1519 *      map it into PCI space for the hardware to use. The hardware
1520 *      requires *64* byte alignment of the buffer which makes life
1521 *      less fun than would be ideal.
1522 */
1523static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1524{
1525        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1526        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1527
1528        rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1529        if (rd_info->skb == NULL)
1530                return -ENOMEM;
1531
1532        /*
1533         *      Do the gymnastics to get the buffer head for data at
1534         *      64byte alignment.
1535         */
1536        skb_reserve(rd_info->skb,
1537                        64 - ((unsigned long) rd_info->skb->data & 63));
1538        rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1539                                        vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1540
1541        /*
1542         *      Fill in the descriptor to match
1543         */
1544
1545        *((u32 *) & (rd->rdesc0)) = 0;
1546        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1547        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1548        rd->pa_high = 0;
1549        return 0;
1550}
1551
1552
1553static int velocity_rx_refill(struct velocity_info *vptr)
1554{
1555        int dirty = vptr->rx.dirty, done = 0;
1556
1557        do {
1558                struct rx_desc *rd = vptr->rx.ring + dirty;
1559
1560                /* Fine for an all zero Rx desc at init time as well */
1561                if (rd->rdesc0.len & OWNED_BY_NIC)
1562                        break;
1563
1564                if (!vptr->rx.info[dirty].skb) {
1565                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1566                                break;
1567                }
1568                done++;
1569                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1570        } while (dirty != vptr->rx.curr);
1571
1572        if (done) {
1573                vptr->rx.dirty = dirty;
1574                vptr->rx.filled += done;
1575        }
1576
1577        return done;
1578}
1579
1580/**
1581 *      velocity_free_rd_ring   -       free receive ring
1582 *      @vptr: velocity to clean up
1583 *
1584 *      Free the receive buffers for each ring slot and any
1585 *      attached socket buffers that need to go away.
1586 */
1587static void velocity_free_rd_ring(struct velocity_info *vptr)
1588{
1589        int i;
1590
1591        if (vptr->rx.info == NULL)
1592                return;
1593
1594        for (i = 0; i < vptr->options.numrx; i++) {
1595                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1596                struct rx_desc *rd = vptr->rx.ring + i;
1597
1598                memset(rd, 0, sizeof(*rd));
1599
1600                if (!rd_info->skb)
1601                        continue;
1602                pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1603                                 PCI_DMA_FROMDEVICE);
1604                rd_info->skb_dma = 0;
1605
1606                dev_kfree_skb(rd_info->skb);
1607                rd_info->skb = NULL;
1608        }
1609
1610        kfree(vptr->rx.info);
1611        vptr->rx.info = NULL;
1612}
1613
1614
1615
1616/**
1617 *      velocity_init_rd_ring   -       set up receive ring
1618 *      @vptr: velocity to configure
1619 *
1620 *      Allocate and set up the receive buffers for each ring slot and
1621 *      assign them to the network adapter.
1622 */
1623static int velocity_init_rd_ring(struct velocity_info *vptr)
1624{
1625        int ret = -ENOMEM;
1626
1627        vptr->rx.info = kcalloc(vptr->options.numrx,
1628                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1629        if (!vptr->rx.info)
1630                goto out;
1631
1632        velocity_init_rx_ring_indexes(vptr);
1633
1634        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1635                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1636                        "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1637                velocity_free_rd_ring(vptr);
1638                goto out;
1639        }
1640
1641        ret = 0;
1642out:
1643        return ret;
1644}
1645
1646/**
1647 *      velocity_init_td_ring   -       set up transmit ring
1648 *      @vptr:  velocity
1649 *
1650 *      Set up the transmit ring and chain the ring pointers together.
1651 *      Returns zero on success or a negative posix errno code for
1652 *      failure.
1653 */
1654static int velocity_init_td_ring(struct velocity_info *vptr)
1655{
1656        int j;
1657
1658        /* Init the TD ring entries */
1659        for (j = 0; j < vptr->tx.numq; j++) {
1660
1661                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1662                                            sizeof(struct velocity_td_info),
1663                                            GFP_KERNEL);
1664                if (!vptr->tx.infos[j]) {
1665                        while (--j >= 0)
1666                                kfree(vptr->tx.infos[j]);
1667                        return -ENOMEM;
1668                }
1669
1670                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1671        }
1672        return 0;
1673}
1674
1675/**
1676 *      velocity_free_dma_rings -       free PCI ring pointers
1677 *      @vptr: Velocity to free from
1678 *
1679 *      Clean up the PCI ring buffers allocated to this velocity.
1680 */
1681static void velocity_free_dma_rings(struct velocity_info *vptr)
1682{
1683        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1684                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1685
1686        pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1687}
1688
1689
1690static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1691{
1692        int ret;
1693
1694        velocity_set_rxbufsize(vptr, mtu);
1695
1696        ret = velocity_init_dma_rings(vptr);
1697        if (ret < 0)
1698                goto out;
1699
1700        ret = velocity_init_rd_ring(vptr);
1701        if (ret < 0)
1702                goto err_free_dma_rings_0;
1703
1704        ret = velocity_init_td_ring(vptr);
1705        if (ret < 0)
1706                goto err_free_rd_ring_1;
1707out:
1708        return ret;
1709
1710err_free_rd_ring_1:
1711        velocity_free_rd_ring(vptr);
1712err_free_dma_rings_0:
1713        velocity_free_dma_rings(vptr);
1714        goto out;
1715}
1716
1717/**
1718 *      velocity_free_tx_buf    -       free transmit buffer
1719 *      @vptr: velocity
1720 *      @tdinfo: buffer
1721 *
1722 *      Release an transmit buffer. If the buffer was preallocated then
1723 *      recycle it, if not then unmap the buffer.
1724 */
1725static void velocity_free_tx_buf(struct velocity_info *vptr,
1726                struct velocity_td_info *tdinfo, struct tx_desc *td)
1727{
1728        struct sk_buff *skb = tdinfo->skb;
1729
1730        /*
1731         *      Don't unmap the pre-allocated tx_bufs
1732         */
1733        if (tdinfo->skb_dma) {
1734                int i;
1735
1736                for (i = 0; i < tdinfo->nskb_dma; i++) {
1737                        size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1738
1739                        /* For scatter-gather */
1740                        if (skb_shinfo(skb)->nr_frags > 0)
1741                                pktlen = max_t(size_t, pktlen,
1742                                                td->td_buf[i].size & ~TD_QUEUE);
1743
1744                        pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1745                                        le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1746                }
1747        }
1748        dev_kfree_skb_irq(skb);
1749        tdinfo->skb = NULL;
1750}
1751
1752
1753/*
1754 *      FIXME: could we merge this with velocity_free_tx_buf ?
1755 */
1756static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1757                                                         int q, int n)
1758{
1759        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1760        int i;
1761
1762        if (td_info == NULL)
1763                return;
1764
1765        if (td_info->skb) {
1766                for (i = 0; i < td_info->nskb_dma; i++) {
1767                        if (td_info->skb_dma[i]) {
1768                                pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1769                                        td_info->skb->len, PCI_DMA_TODEVICE);
1770                                td_info->skb_dma[i] = 0;
1771                        }
1772                }
1773                dev_kfree_skb(td_info->skb);
1774                td_info->skb = NULL;
1775        }
1776}
1777
1778/**
1779 *      velocity_free_td_ring   -       free td ring
1780 *      @vptr: velocity
1781 *
1782 *      Free up the transmit ring for this particular velocity adapter.
1783 *      We free the ring contents but not the ring itself.
1784 */
1785static void velocity_free_td_ring(struct velocity_info *vptr)
1786{
1787        int i, j;
1788
1789        for (j = 0; j < vptr->tx.numq; j++) {
1790                if (vptr->tx.infos[j] == NULL)
1791                        continue;
1792                for (i = 0; i < vptr->options.numtx; i++)
1793                        velocity_free_td_ring_entry(vptr, j, i);
1794
1795                kfree(vptr->tx.infos[j]);
1796                vptr->tx.infos[j] = NULL;
1797        }
1798}
1799
1800
1801static void velocity_free_rings(struct velocity_info *vptr)
1802{
1803        velocity_free_td_ring(vptr);
1804        velocity_free_rd_ring(vptr);
1805        velocity_free_dma_rings(vptr);
1806}
1807
1808/**
1809 *      velocity_error  -       handle error from controller
1810 *      @vptr: velocity
1811 *      @status: card status
1812 *
1813 *      Process an error report from the hardware and attempt to recover
1814 *      the card itself. At the moment we cannot recover from some
1815 *      theoretically impossible errors but this could be fixed using
1816 *      the pci_device_failed logic to bounce the hardware
1817 *
1818 */
1819static void velocity_error(struct velocity_info *vptr, int status)
1820{
1821
1822        if (status & ISR_TXSTLI) {
1823                struct mac_regs __iomem *regs = vptr->mac_regs;
1824
1825                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1826                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1827                writew(TRDCSR_RUN, &regs->TDCSRClr);
1828                netif_stop_queue(vptr->dev);
1829
1830                /* FIXME: port over the pci_device_failed code and use it
1831                   here */
1832        }
1833
1834        if (status & ISR_SRCI) {
1835                struct mac_regs __iomem *regs = vptr->mac_regs;
1836                int linked;
1837
1838                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1839                        vptr->mii_status = check_connection_type(regs);
1840
1841                        /*
1842                         *      If it is a 3119, disable frame bursting in
1843                         *      halfduplex mode and enable it in fullduplex
1844                         *       mode
1845                         */
1846                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1847                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1848                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1849                                else
1850                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1851                        }
1852                        /*
1853                         *      Only enable CD heart beat counter in 10HD mode
1854                         */
1855                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1856                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1857                        else
1858                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1859
1860                        setup_queue_timers(vptr);
1861                }
1862                /*
1863                 *      Get link status from PHYSR0
1864                 */
1865                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1866
1867                if (linked) {
1868                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1869                        netif_carrier_on(vptr->dev);
1870                } else {
1871                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1872                        netif_carrier_off(vptr->dev);
1873                }
1874
1875                velocity_print_link_status(vptr);
1876                enable_flow_control_ability(vptr);
1877
1878                /*
1879                 *      Re-enable auto-polling because SRCI will disable
1880                 *      auto-polling
1881                 */
1882
1883                enable_mii_autopoll(regs);
1884
1885                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1886                        netif_stop_queue(vptr->dev);
1887                else
1888                        netif_wake_queue(vptr->dev);
1889
1890        };
1891        if (status & ISR_MIBFI)
1892                velocity_update_hw_mibs(vptr);
1893        if (status & ISR_LSTEI)
1894                mac_rx_queue_wake(vptr->mac_regs);
1895}
1896
1897/**
1898 *      tx_srv          -       transmit interrupt service
1899 *      @vptr; Velocity
1900 *
1901 *      Scan the queues looking for transmitted packets that
1902 *      we can complete and clean up. Update any statistics as
1903 *      necessary/
1904 */
1905static int velocity_tx_srv(struct velocity_info *vptr)
1906{
1907        struct tx_desc *td;
1908        int qnum;
1909        int full = 0;
1910        int idx;
1911        int works = 0;
1912        struct velocity_td_info *tdinfo;
1913        struct net_device_stats *stats = &vptr->dev->stats;
1914
1915        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1916                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1917                        idx = (idx + 1) % vptr->options.numtx) {
1918
1919                        /*
1920                         *      Get Tx Descriptor
1921                         */
1922                        td = &(vptr->tx.rings[qnum][idx]);
1923                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1924
1925                        if (td->tdesc0.len & OWNED_BY_NIC)
1926                                break;
1927
1928                        if ((works++ > 15))
1929                                break;
1930
1931                        if (td->tdesc0.TSR & TSR0_TERR) {
1932                                stats->tx_errors++;
1933                                stats->tx_dropped++;
1934                                if (td->tdesc0.TSR & TSR0_CDH)
1935                                        stats->tx_heartbeat_errors++;
1936                                if (td->tdesc0.TSR & TSR0_CRS)
1937                                        stats->tx_carrier_errors++;
1938                                if (td->tdesc0.TSR & TSR0_ABT)
1939                                        stats->tx_aborted_errors++;
1940                                if (td->tdesc0.TSR & TSR0_OWC)
1941                                        stats->tx_window_errors++;
1942                        } else {
1943                                stats->tx_packets++;
1944                                stats->tx_bytes += tdinfo->skb->len;
1945                        }
1946                        velocity_free_tx_buf(vptr, tdinfo, td);
1947                        vptr->tx.used[qnum]--;
1948                }
1949                vptr->tx.tail[qnum] = idx;
1950
1951                if (AVAIL_TD(vptr, qnum) < 1)
1952                        full = 1;
1953        }
1954        /*
1955         *      Look to see if we should kick the transmit network
1956         *      layer for more work.
1957         */
1958        if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1959            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1960                netif_wake_queue(vptr->dev);
1961        }
1962        return works;
1963}
1964
1965/**
1966 *      velocity_rx_csum        -       checksum process
1967 *      @rd: receive packet descriptor
1968 *      @skb: network layer packet buffer
1969 *
1970 *      Process the status bits for the received packet and determine
1971 *      if the checksum was computed and verified by the hardware
1972 */
1973static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1974{
1975        skb_checksum_none_assert(skb);
1976
1977        if (rd->rdesc1.CSM & CSM_IPKT) {
1978                if (rd->rdesc1.CSM & CSM_IPOK) {
1979                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1980                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1981                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1982                                        return;
1983                        }
1984                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1985                }
1986        }
1987}
1988
1989/**
1990 *      velocity_rx_copy        -       in place Rx copy for small packets
1991 *      @rx_skb: network layer packet buffer candidate
1992 *      @pkt_size: received data size
1993 *      @rd: receive packet descriptor
1994 *      @dev: network device
1995 *
1996 *      Replace the current skb that is scheduled for Rx processing by a
1997 *      shorter, immediatly allocated skb, if the received packet is small
1998 *      enough. This function returns a negative value if the received
1999 *      packet is too big or if memory is exhausted.
2000 */
2001static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2002                            struct velocity_info *vptr)
2003{
2004        int ret = -1;
2005        if (pkt_size < rx_copybreak) {
2006                struct sk_buff *new_skb;
2007
2008                new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
2009                if (new_skb) {
2010                        new_skb->ip_summed = rx_skb[0]->ip_summed;
2011                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2012                        *rx_skb = new_skb;
2013                        ret = 0;
2014                }
2015
2016        }
2017        return ret;
2018}
2019
2020/**
2021 *      velocity_iph_realign    -       IP header alignment
2022 *      @vptr: velocity we are handling
2023 *      @skb: network layer packet buffer
2024 *      @pkt_size: received data size
2025 *
2026 *      Align IP header on a 2 bytes boundary. This behavior can be
2027 *      configured by the user.
2028 */
2029static inline void velocity_iph_realign(struct velocity_info *vptr,
2030                                        struct sk_buff *skb, int pkt_size)
2031{
2032        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2033                memmove(skb->data + 2, skb->data, pkt_size);
2034                skb_reserve(skb, 2);
2035        }
2036}
2037
2038
2039/**
2040 *      velocity_receive_frame  -       received packet processor
2041 *      @vptr: velocity we are handling
2042 *      @idx: ring index
2043 *
2044 *      A packet has arrived. We process the packet and if appropriate
2045 *      pass the frame up the network stack
2046 */
2047static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2048{
2049        void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2050        struct net_device_stats *stats = &vptr->dev->stats;
2051        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2052        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2053        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2054        struct sk_buff *skb;
2055
2056        if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2057                VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2058                stats->rx_length_errors++;
2059                return -EINVAL;
2060        }
2061
2062        if (rd->rdesc0.RSR & RSR_MAR)
2063                stats->multicast++;
2064
2065        skb = rd_info->skb;
2066
2067        pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2068                                    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2069
2070        /*
2071         *      Drop frame not meeting IEEE 802.3
2072         */
2073
2074        if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2075                if (rd->rdesc0.RSR & RSR_RL) {
2076                        stats->rx_length_errors++;
2077                        return -EINVAL;
2078                }
2079        }
2080
2081        pci_action = pci_dma_sync_single_for_device;
2082
2083        velocity_rx_csum(rd, skb);
2084
2085        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2086                velocity_iph_realign(vptr, skb, pkt_len);
2087                pci_action = pci_unmap_single;
2088                rd_info->skb = NULL;
2089        }
2090
2091        pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2092                   PCI_DMA_FROMDEVICE);
2093
2094        skb_put(skb, pkt_len - 4);
2095        skb->protocol = eth_type_trans(skb, vptr->dev);
2096
2097        if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
2098                vlan_hwaccel_rx(skb, vptr->vlgrp,
2099                                swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
2100        } else
2101                netif_rx(skb);
2102
2103        stats->rx_bytes += pkt_len;
2104
2105        return 0;
2106}
2107
2108
2109/**
2110 *      velocity_rx_srv         -       service RX interrupt
2111 *      @vptr: velocity
2112 *
2113 *      Walk the receive ring of the velocity adapter and remove
2114 *      any received packets from the receive queue. Hand the ring
2115 *      slots back to the adapter for reuse.
2116 */
2117static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2118{
2119        struct net_device_stats *stats = &vptr->dev->stats;
2120        int rd_curr = vptr->rx.curr;
2121        int works = 0;
2122
2123        while (works < budget_left) {
2124                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2125
2126                if (!vptr->rx.info[rd_curr].skb)
2127                        break;
2128
2129                if (rd->rdesc0.len & OWNED_BY_NIC)
2130                        break;
2131
2132                rmb();
2133
2134                /*
2135                 *      Don't drop CE or RL error frame although RXOK is off
2136                 */
2137                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2138                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2139                                stats->rx_dropped++;
2140                } else {
2141                        if (rd->rdesc0.RSR & RSR_CRC)
2142                                stats->rx_crc_errors++;
2143                        if (rd->rdesc0.RSR & RSR_FAE)
2144                                stats->rx_frame_errors++;
2145
2146                        stats->rx_dropped++;
2147                }
2148
2149                rd->size |= RX_INTEN;
2150
2151                rd_curr++;
2152                if (rd_curr >= vptr->options.numrx)
2153                        rd_curr = 0;
2154                works++;
2155        }
2156
2157        vptr->rx.curr = rd_curr;
2158
2159        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2160                velocity_give_many_rx_descs(vptr);
2161
2162        VAR_USED(stats);
2163        return works;
2164}
2165
2166static int velocity_poll(struct napi_struct *napi, int budget)
2167{
2168        struct velocity_info *vptr = container_of(napi,
2169                        struct velocity_info, napi);
2170        unsigned int rx_done;
2171        unsigned long flags;
2172
2173        spin_lock_irqsave(&vptr->lock, flags);
2174        /*
2175         * Do rx and tx twice for performance (taken from the VIA
2176         * out-of-tree driver).
2177         */
2178        rx_done = velocity_rx_srv(vptr, budget / 2);
2179        velocity_tx_srv(vptr);
2180        rx_done += velocity_rx_srv(vptr, budget - rx_done);
2181        velocity_tx_srv(vptr);
2182
2183        /* If budget not fully consumed, exit the polling mode */
2184        if (rx_done < budget) {
2185                napi_complete(napi);
2186                mac_enable_int(vptr->mac_regs);
2187        }
2188        spin_unlock_irqrestore(&vptr->lock, flags);
2189
2190        return rx_done;
2191}
2192
2193/**
2194 *      velocity_intr           -       interrupt callback
2195 *      @irq: interrupt number
2196 *      @dev_instance: interrupting device
2197 *
2198 *      Called whenever an interrupt is generated by the velocity
2199 *      adapter IRQ line. We may not be the source of the interrupt
2200 *      and need to identify initially if we are, and if not exit as
2201 *      efficiently as possible.
2202 */
2203static irqreturn_t velocity_intr(int irq, void *dev_instance)
2204{
2205        struct net_device *dev = dev_instance;
2206        struct velocity_info *vptr = netdev_priv(dev);
2207        u32 isr_status;
2208
2209        spin_lock(&vptr->lock);
2210        isr_status = mac_read_isr(vptr->mac_regs);
2211
2212        /* Not us ? */
2213        if (isr_status == 0) {
2214                spin_unlock(&vptr->lock);
2215                return IRQ_NONE;
2216        }
2217
2218        /* Ack the interrupt */
2219        mac_write_isr(vptr->mac_regs, isr_status);
2220
2221        if (likely(napi_schedule_prep(&vptr->napi))) {
2222                mac_disable_int(vptr->mac_regs);
2223                __napi_schedule(&vptr->napi);
2224        }
2225
2226        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2227                velocity_error(vptr, isr_status);
2228
2229        spin_unlock(&vptr->lock);
2230
2231        return IRQ_HANDLED;
2232}
2233
2234/**
2235 *      velocity_open           -       interface activation callback
2236 *      @dev: network layer device to open
2237 *
2238 *      Called when the network layer brings the interface up. Returns
2239 *      a negative posix error code on failure, or zero on success.
2240 *
2241 *      All the ring allocation and set up is done on open for this
2242 *      adapter to minimise memory usage when inactive
2243 */
2244static int velocity_open(struct net_device *dev)
2245{
2246        struct velocity_info *vptr = netdev_priv(dev);
2247        int ret;
2248
2249        ret = velocity_init_rings(vptr, dev->mtu);
2250        if (ret < 0)
2251                goto out;
2252
2253        /* Ensure chip is running */
2254        pci_set_power_state(vptr->pdev, PCI_D0);
2255
2256        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2257
2258        ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2259                          dev->name, dev);
2260        if (ret < 0) {
2261                /* Power down the chip */
2262                pci_set_power_state(vptr->pdev, PCI_D3hot);
2263                velocity_free_rings(vptr);
2264                goto out;
2265        }
2266
2267        velocity_give_many_rx_descs(vptr);
2268
2269        mac_enable_int(vptr->mac_regs);
2270        netif_start_queue(dev);
2271        napi_enable(&vptr->napi);
2272        vptr->flags |= VELOCITY_FLAGS_OPENED;
2273out:
2274        return ret;
2275}
2276
2277/**
2278 *      velocity_shutdown       -       shut down the chip
2279 *      @vptr: velocity to deactivate
2280 *
2281 *      Shuts down the internal operations of the velocity and
2282 *      disables interrupts, autopolling, transmit and receive
2283 */
2284static void velocity_shutdown(struct velocity_info *vptr)
2285{
2286        struct mac_regs __iomem *regs = vptr->mac_regs;
2287        mac_disable_int(regs);
2288        writel(CR0_STOP, &regs->CR0Set);
2289        writew(0xFFFF, &regs->TDCSRClr);
2290        writeb(0xFF, &regs->RDCSRClr);
2291        safe_disable_mii_autopoll(regs);
2292        mac_clear_isr(regs);
2293}
2294
2295/**
2296 *      velocity_change_mtu     -       MTU change callback
2297 *      @dev: network device
2298 *      @new_mtu: desired MTU
2299 *
2300 *      Handle requests from the networking layer for MTU change on
2301 *      this interface. It gets called on a change by the network layer.
2302 *      Return zero for success or negative posix error code.
2303 */
2304static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2305{
2306        struct velocity_info *vptr = netdev_priv(dev);
2307        int ret = 0;
2308
2309        if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2310                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2311                                vptr->dev->name);
2312                ret = -EINVAL;
2313                goto out_0;
2314        }
2315
2316        if (!netif_running(dev)) {
2317                dev->mtu = new_mtu;
2318                goto out_0;
2319        }
2320
2321        if (dev->mtu != new_mtu) {
2322                struct velocity_info *tmp_vptr;
2323                unsigned long flags;
2324                struct rx_info rx;
2325                struct tx_info tx;
2326
2327                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2328                if (!tmp_vptr) {
2329                        ret = -ENOMEM;
2330                        goto out_0;
2331                }
2332
2333                tmp_vptr->dev = dev;
2334                tmp_vptr->pdev = vptr->pdev;
2335                tmp_vptr->options = vptr->options;
2336                tmp_vptr->tx.numq = vptr->tx.numq;
2337
2338                ret = velocity_init_rings(tmp_vptr, new_mtu);
2339                if (ret < 0)
2340                        goto out_free_tmp_vptr_1;
2341
2342                spin_lock_irqsave(&vptr->lock, flags);
2343
2344                netif_stop_queue(dev);
2345                velocity_shutdown(vptr);
2346
2347                rx = vptr->rx;
2348                tx = vptr->tx;
2349
2350                vptr->rx = tmp_vptr->rx;
2351                vptr->tx = tmp_vptr->tx;
2352
2353                tmp_vptr->rx = rx;
2354                tmp_vptr->tx = tx;
2355
2356                dev->mtu = new_mtu;
2357
2358                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2359
2360                velocity_give_many_rx_descs(vptr);
2361
2362                mac_enable_int(vptr->mac_regs);
2363                netif_start_queue(dev);
2364
2365                spin_unlock_irqrestore(&vptr->lock, flags);
2366
2367                velocity_free_rings(tmp_vptr);
2368
2369out_free_tmp_vptr_1:
2370                kfree(tmp_vptr);
2371        }
2372out_0:
2373        return ret;
2374}
2375
2376/**
2377 *      velocity_mii_ioctl              -       MII ioctl handler
2378 *      @dev: network device
2379 *      @ifr: the ifreq block for the ioctl
2380 *      @cmd: the command
2381 *
2382 *      Process MII requests made via ioctl from the network layer. These
2383 *      are used by tools like kudzu to interrogate the link state of the
2384 *      hardware
2385 */
2386static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2387{
2388        struct velocity_info *vptr = netdev_priv(dev);
2389        struct mac_regs __iomem *regs = vptr->mac_regs;
2390        unsigned long flags;
2391        struct mii_ioctl_data *miidata = if_mii(ifr);
2392        int err;
2393
2394        switch (cmd) {
2395        case SIOCGMIIPHY:
2396                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2397                break;
2398        case SIOCGMIIREG:
2399                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2400                        return -ETIMEDOUT;
2401                break;
2402        case SIOCSMIIREG:
2403                spin_lock_irqsave(&vptr->lock, flags);
2404                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2405                spin_unlock_irqrestore(&vptr->lock, flags);
2406                check_connection_type(vptr->mac_regs);
2407                if (err)
2408                        return err;
2409                break;
2410        default:
2411                return -EOPNOTSUPP;
2412        }
2413        return 0;
2414}
2415
2416
2417/**
2418 *      velocity_ioctl          -       ioctl entry point
2419 *      @dev: network device
2420 *      @rq: interface request ioctl
2421 *      @cmd: command code
2422 *
2423 *      Called when the user issues an ioctl request to the network
2424 *      device in question. The velocity interface supports MII.
2425 */
2426static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2427{
2428        struct velocity_info *vptr = netdev_priv(dev);
2429        int ret;
2430
2431        /* If we are asked for information and the device is power
2432           saving then we need to bring the device back up to talk to it */
2433
2434        if (!netif_running(dev))
2435                pci_set_power_state(vptr->pdev, PCI_D0);
2436
2437        switch (cmd) {
2438        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2439        case SIOCGMIIREG:       /* Read MII PHY register. */
2440        case SIOCSMIIREG:       /* Write to MII PHY register. */
2441                ret = velocity_mii_ioctl(dev, rq, cmd);
2442                break;
2443
2444        default:
2445                ret = -EOPNOTSUPP;
2446        }
2447        if (!netif_running(dev))
2448                pci_set_power_state(vptr->pdev, PCI_D3hot);
2449
2450
2451        return ret;
2452}
2453
2454/**
2455 *      velocity_get_status     -       statistics callback
2456 *      @dev: network device
2457 *
2458 *      Callback from the network layer to allow driver statistics
2459 *      to be resynchronized with hardware collected state. In the
2460 *      case of the velocity we need to pull the MIB counters from
2461 *      the hardware into the counters before letting the network
2462 *      layer display them.
2463 */
2464static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2465{
2466        struct velocity_info *vptr = netdev_priv(dev);
2467
2468        /* If the hardware is down, don't touch MII */
2469        if (!netif_running(dev))
2470                return &dev->stats;
2471
2472        spin_lock_irq(&vptr->lock);
2473        velocity_update_hw_mibs(vptr);
2474        spin_unlock_irq(&vptr->lock);
2475
2476        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2477        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2478        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2479
2480//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2481        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2482        /* detailed rx_errors: */
2483//  unsigned long   rx_length_errors;
2484//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2485        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2486//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2487//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2488//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2489
2490        /* detailed tx_errors */
2491//  unsigned long   tx_fifo_errors;
2492
2493        return &dev->stats;
2494}
2495
2496/**
2497 *      velocity_close          -       close adapter callback
2498 *      @dev: network device
2499 *
2500 *      Callback from the network layer when the velocity is being
2501 *      deactivated by the network layer
2502 */
2503static int velocity_close(struct net_device *dev)
2504{
2505        struct velocity_info *vptr = netdev_priv(dev);
2506
2507        napi_disable(&vptr->napi);
2508        netif_stop_queue(dev);
2509        velocity_shutdown(vptr);
2510
2511        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2512                velocity_get_ip(vptr);
2513        if (dev->irq != 0)
2514                free_irq(dev->irq, dev);
2515
2516        /* Power down the chip */
2517        pci_set_power_state(vptr->pdev, PCI_D3hot);
2518
2519        velocity_free_rings(vptr);
2520
2521        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2522        return 0;
2523}
2524
2525/**
2526 *      velocity_xmit           -       transmit packet callback
2527 *      @skb: buffer to transmit
2528 *      @dev: network device
2529 *
2530 *      Called by the networ layer to request a packet is queued to
2531 *      the velocity. Returns zero on success.
2532 */
2533static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2534                                 struct net_device *dev)
2535{
2536        struct velocity_info *vptr = netdev_priv(dev);
2537        int qnum = 0;
2538        struct tx_desc *td_ptr;
2539        struct velocity_td_info *tdinfo;
2540        unsigned long flags;
2541        int pktlen;
2542        int index, prev;
2543        int i = 0;
2544
2545        if (skb_padto(skb, ETH_ZLEN))
2546                goto out;
2547
2548        /* The hardware can handle at most 7 memory segments, so merge
2549         * the skb if there are more */
2550        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2551                kfree_skb(skb);
2552                return NETDEV_TX_OK;
2553        }
2554
2555        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2556                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2557                                skb_headlen(skb);
2558
2559        spin_lock_irqsave(&vptr->lock, flags);
2560
2561        index = vptr->tx.curr[qnum];
2562        td_ptr = &(vptr->tx.rings[qnum][index]);
2563        tdinfo = &(vptr->tx.infos[qnum][index]);
2564
2565        td_ptr->tdesc1.TCR = TCR0_TIC;
2566        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2567
2568        /*
2569         *      Map the linear network buffer into PCI space and
2570         *      add it to the transmit ring.
2571         */
2572        tdinfo->skb = skb;
2573        tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2574        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2575        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2576        td_ptr->td_buf[0].pa_high = 0;
2577        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2578
2579        /* Handle fragments */
2580        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2581                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2582
2583                tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2584                                frag->page_offset, frag->size,
2585                                PCI_DMA_TODEVICE);
2586
2587                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2588                td_ptr->td_buf[i + 1].pa_high = 0;
2589                td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2590        }
2591        tdinfo->nskb_dma = i + 1;
2592
2593        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2594
2595        if (vlan_tx_tag_present(skb)) {
2596                td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2597                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2598        }
2599
2600        /*
2601         *      Handle hardware checksum
2602         */
2603        if ((dev->features & NETIF_F_IP_CSUM) &&
2604            (skb->ip_summed == CHECKSUM_PARTIAL)) {
2605                const struct iphdr *ip = ip_hdr(skb);
2606                if (ip->protocol == IPPROTO_TCP)
2607                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2608                else if (ip->protocol == IPPROTO_UDP)
2609                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2610                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2611        }
2612
2613        prev = index - 1;
2614        if (prev < 0)
2615                prev = vptr->options.numtx - 1;
2616        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2617        vptr->tx.used[qnum]++;
2618        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2619
2620        if (AVAIL_TD(vptr, qnum) < 1)
2621                netif_stop_queue(dev);
2622
2623        td_ptr = &(vptr->tx.rings[qnum][prev]);
2624        td_ptr->td_buf[0].size |= TD_QUEUE;
2625        mac_tx_queue_wake(vptr->mac_regs, qnum);
2626
2627        spin_unlock_irqrestore(&vptr->lock, flags);
2628out:
2629        return NETDEV_TX_OK;
2630}
2631
2632
2633static const struct net_device_ops velocity_netdev_ops = {
2634        .ndo_open               = velocity_open,
2635        .ndo_stop               = velocity_close,
2636        .ndo_start_xmit         = velocity_xmit,
2637        .ndo_get_stats          = velocity_get_stats,
2638        .ndo_validate_addr      = eth_validate_addr,
2639        .ndo_set_mac_address    = eth_mac_addr,
2640        .ndo_set_multicast_list = velocity_set_multi,
2641        .ndo_change_mtu         = velocity_change_mtu,
2642        .ndo_do_ioctl           = velocity_ioctl,
2643        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2644        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2645        .ndo_vlan_rx_register   = velocity_vlan_rx_register,
2646};
2647
2648/**
2649 *      velocity_init_info      -       init private data
2650 *      @pdev: PCI device
2651 *      @vptr: Velocity info
2652 *      @info: Board type
2653 *
2654 *      Set up the initial velocity_info struct for the device that has been
2655 *      discovered.
2656 */
2657static void __devinit velocity_init_info(struct pci_dev *pdev,
2658                                         struct velocity_info *vptr,
2659                                         const struct velocity_info_tbl *info)
2660{
2661        memset(vptr, 0, sizeof(struct velocity_info));
2662
2663        vptr->pdev = pdev;
2664        vptr->chip_id = info->chip_id;
2665        vptr->tx.numq = info->txqueue;
2666        vptr->multicast_limit = MCAM_SIZE;
2667        spin_lock_init(&vptr->lock);
2668}
2669
2670/**
2671 *      velocity_get_pci_info   -       retrieve PCI info for device
2672 *      @vptr: velocity device
2673 *      @pdev: PCI device it matches
2674 *
2675 *      Retrieve the PCI configuration space data that interests us from
2676 *      the kernel PCI layer
2677 */
2678static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2679{
2680        vptr->rev_id = pdev->revision;
2681
2682        pci_set_master(pdev);
2683
2684        vptr->ioaddr = pci_resource_start(pdev, 0);
2685        vptr->memaddr = pci_resource_start(pdev, 1);
2686
2687        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2688                dev_err(&pdev->dev,
2689                           "region #0 is not an I/O resource, aborting.\n");
2690                return -EINVAL;
2691        }
2692
2693        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2694                dev_err(&pdev->dev,
2695                           "region #1 is an I/O resource, aborting.\n");
2696                return -EINVAL;
2697        }
2698
2699        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2700                dev_err(&pdev->dev, "region #1 is too small.\n");
2701                return -EINVAL;
2702        }
2703        vptr->pdev = pdev;
2704
2705        return 0;
2706}
2707
2708/**
2709 *      velocity_print_info     -       per driver data
2710 *      @vptr: velocity
2711 *
2712 *      Print per driver data as the kernel driver finds Velocity
2713 *      hardware
2714 */
2715static void __devinit velocity_print_info(struct velocity_info *vptr)
2716{
2717        struct net_device *dev = vptr->dev;
2718
2719        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2720        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2721                dev->name, dev->dev_addr);
2722}
2723
2724static u32 velocity_get_link(struct net_device *dev)
2725{
2726        struct velocity_info *vptr = netdev_priv(dev);
2727        struct mac_regs __iomem *regs = vptr->mac_regs;
2728        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2729}
2730
2731
2732/**
2733 *      velocity_found1         -       set up discovered velocity card
2734 *      @pdev: PCI device
2735 *      @ent: PCI device table entry that matched
2736 *
2737 *      Configure a discovered adapter from scratch. Return a negative
2738 *      errno error code on failure paths.
2739 */
2740static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2741{
2742        static int first = 1;
2743        struct net_device *dev;
2744        int i;
2745        const char *drv_string;
2746        const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2747        struct velocity_info *vptr;
2748        struct mac_regs __iomem *regs;
2749        int ret = -ENOMEM;
2750
2751        /* FIXME: this driver, like almost all other ethernet drivers,
2752         * can support more than MAX_UNITS.
2753         */
2754        if (velocity_nics >= MAX_UNITS) {
2755                dev_notice(&pdev->dev, "already found %d NICs.\n",
2756                           velocity_nics);
2757                return -ENODEV;
2758        }
2759
2760        dev = alloc_etherdev(sizeof(struct velocity_info));
2761        if (!dev) {
2762                dev_err(&pdev->dev, "allocate net device failed.\n");
2763                goto out;
2764        }
2765
2766        /* Chain it all together */
2767
2768        SET_NETDEV_DEV(dev, &pdev->dev);
2769        vptr = netdev_priv(dev);
2770
2771
2772        if (first) {
2773                printk(KERN_INFO "%s Ver. %s\n",
2774                        VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2775                printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2776                printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2777                first = 0;
2778        }
2779
2780        velocity_init_info(pdev, vptr, info);
2781
2782        vptr->dev = dev;
2783
2784        ret = pci_enable_device(pdev);
2785        if (ret < 0)
2786                goto err_free_dev;
2787
2788        dev->irq = pdev->irq;
2789
2790        ret = velocity_get_pci_info(vptr, pdev);
2791        if (ret < 0) {
2792                /* error message already printed */
2793                goto err_disable;
2794        }
2795
2796        ret = pci_request_regions(pdev, VELOCITY_NAME);
2797        if (ret < 0) {
2798                dev_err(&pdev->dev, "No PCI resources.\n");
2799                goto err_disable;
2800        }
2801
2802        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2803        if (regs == NULL) {
2804                ret = -EIO;
2805                goto err_release_res;
2806        }
2807
2808        vptr->mac_regs = regs;
2809
2810        mac_wol_reset(regs);
2811
2812        dev->base_addr = vptr->ioaddr;
2813
2814        for (i = 0; i < 6; i++)
2815                dev->dev_addr[i] = readb(&regs->PAR[i]);
2816
2817
2818        drv_string = dev_driver_string(&pdev->dev);
2819
2820        velocity_get_options(&vptr->options, velocity_nics, drv_string);
2821
2822        /*
2823         *      Mask out the options cannot be set to the chip
2824         */
2825
2826        vptr->options.flags &= info->flags;
2827
2828        /*
2829         *      Enable the chip specified capbilities
2830         */
2831
2832        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2833
2834        vptr->wol_opts = vptr->options.wol_opts;
2835        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2836
2837        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2838
2839        dev->irq = pdev->irq;
2840        dev->netdev_ops = &velocity_netdev_ops;
2841        dev->ethtool_ops = &velocity_ethtool_ops;
2842        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2843
2844        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2845                NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2846
2847        ret = register_netdev(dev);
2848        if (ret < 0)
2849                goto err_iounmap;
2850
2851        if (!velocity_get_link(dev)) {
2852                netif_carrier_off(dev);
2853                vptr->mii_status |= VELOCITY_LINK_FAIL;
2854        }
2855
2856        velocity_print_info(vptr);
2857        pci_set_drvdata(pdev, dev);
2858
2859        /* and leave the chip powered down */
2860
2861        pci_set_power_state(pdev, PCI_D3hot);
2862        velocity_nics++;
2863out:
2864        return ret;
2865
2866err_iounmap:
2867        iounmap(regs);
2868err_release_res:
2869        pci_release_regions(pdev);
2870err_disable:
2871        pci_disable_device(pdev);
2872err_free_dev:
2873        free_netdev(dev);
2874        goto out;
2875}
2876
2877
2878#ifdef CONFIG_PM
2879/**
2880 *      wol_calc_crc            -       WOL CRC
2881 *      @pattern: data pattern
2882 *      @mask_pattern: mask
2883 *
2884 *      Compute the wake on lan crc hashes for the packet header
2885 *      we are interested in.
2886 */
2887static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2888{
2889        u16 crc = 0xFFFF;
2890        u8 mask;
2891        int i, j;
2892
2893        for (i = 0; i < size; i++) {
2894                mask = mask_pattern[i];
2895
2896                /* Skip this loop if the mask equals to zero */
2897                if (mask == 0x00)
2898                        continue;
2899
2900                for (j = 0; j < 8; j++) {
2901                        if ((mask & 0x01) == 0) {
2902                                mask >>= 1;
2903                                continue;
2904                        }
2905                        mask >>= 1;
2906                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2907                }
2908        }
2909        /*      Finally, invert the result once to get the correct data */
2910        crc = ~crc;
2911        return bitrev32(crc) >> 16;
2912}
2913
2914/**
2915 *      velocity_set_wol        -       set up for wake on lan
2916 *      @vptr: velocity to set WOL status on
2917 *
2918 *      Set a card up for wake on lan either by unicast or by
2919 *      ARP packet.
2920 *
2921 *      FIXME: check static buffer is safe here
2922 */
2923static int velocity_set_wol(struct velocity_info *vptr)
2924{
2925        struct mac_regs __iomem *regs = vptr->mac_regs;
2926        static u8 buf[256];
2927        int i;
2928
2929        static u32 mask_pattern[2][4] = {
2930                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2931                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
2932        };
2933
2934        writew(0xFFFF, &regs->WOLCRClr);
2935        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2936        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2937
2938        /*
2939           if (vptr->wol_opts & VELOCITY_WOL_PHY)
2940           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2941         */
2942
2943        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2944                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2945
2946        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2947                struct arp_packet *arp = (struct arp_packet *) buf;
2948                u16 crc;
2949                memset(buf, 0, sizeof(struct arp_packet) + 7);
2950
2951                for (i = 0; i < 4; i++)
2952                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2953
2954                arp->type = htons(ETH_P_ARP);
2955                arp->ar_op = htons(1);
2956
2957                memcpy(arp->ar_tip, vptr->ip_addr, 4);
2958
2959                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2960                                (u8 *) & mask_pattern[0][0]);
2961
2962                writew(crc, &regs->PatternCRC[0]);
2963                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2964        }
2965
2966        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2967        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2968
2969        writew(0x0FFF, &regs->WOLSRClr);
2970
2971        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2972                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2973                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2974
2975                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2976        }
2977
2978        if (vptr->mii_status & VELOCITY_SPEED_1000)
2979                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2980
2981        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2982
2983        {
2984                u8 GCR;
2985                GCR = readb(&regs->CHIPGCR);
2986                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2987                writeb(GCR, &regs->CHIPGCR);
2988        }
2989
2990        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2991        /* Turn on SWPTAG just before entering power mode */
2992        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2993        /* Go to bed ..... */
2994        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2995
2996        return 0;
2997}
2998
2999/**
3000 *      velocity_save_context   -       save registers
3001 *      @vptr: velocity
3002 *      @context: buffer for stored context
3003 *
3004 *      Retrieve the current configuration from the velocity hardware
3005 *      and stash it in the context structure, for use by the context
3006 *      restore functions. This allows us to save things we need across
3007 *      power down states
3008 */
3009static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3010{
3011        struct mac_regs __iomem *regs = vptr->mac_regs;
3012        u16 i;
3013        u8 __iomem *ptr = (u8 __iomem *)regs;
3014
3015        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3016                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3017
3018        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3019                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3020
3021        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3022                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3023
3024}
3025
3026static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3027{
3028        struct net_device *dev = pci_get_drvdata(pdev);
3029        struct velocity_info *vptr = netdev_priv(dev);
3030        unsigned long flags;
3031
3032        if (!netif_running(vptr->dev))
3033                return 0;
3034
3035        netif_device_detach(vptr->dev);
3036
3037        spin_lock_irqsave(&vptr->lock, flags);
3038        pci_save_state(pdev);
3039#ifdef ETHTOOL_GWOL
3040        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3041                velocity_get_ip(vptr);
3042                velocity_save_context(vptr, &vptr->context);
3043                velocity_shutdown(vptr);
3044                velocity_set_wol(vptr);
3045                pci_enable_wake(pdev, PCI_D3hot, 1);
3046                pci_set_power_state(pdev, PCI_D3hot);
3047        } else {
3048                velocity_save_context(vptr, &vptr->context);
3049                velocity_shutdown(vptr);
3050                pci_disable_device(pdev);
3051                pci_set_power_state(pdev, pci_choose_state(pdev, state));
3052        }
3053#else
3054        pci_set_power_state(pdev, pci_choose_state(pdev, state));
3055#endif
3056        spin_unlock_irqrestore(&vptr->lock, flags);
3057        return 0;
3058}
3059
3060/**
3061 *      velocity_restore_context        -       restore registers
3062 *      @vptr: velocity
3063 *      @context: buffer for stored context
3064 *
3065 *      Reload the register configuration from the velocity context
3066 *      created by velocity_save_context.
3067 */
3068static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3069{
3070        struct mac_regs __iomem *regs = vptr->mac_regs;
3071        int i;
3072        u8 __iomem *ptr = (u8 __iomem *)regs;
3073
3074        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3075                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3076
3077        /* Just skip cr0 */
3078        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3079                /* Clear */
3080                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3081                /* Set */
3082                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3083        }
3084
3085        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3086                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3087
3088        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3089                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3090
3091        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3092                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3093}
3094
3095static int velocity_resume(struct pci_dev *pdev)
3096{
3097        struct net_device *dev = pci_get_drvdata(pdev);
3098        struct velocity_info *vptr = netdev_priv(dev);
3099        unsigned long flags;
3100        int i;
3101
3102        if (!netif_running(vptr->dev))
3103                return 0;
3104
3105        pci_set_power_state(pdev, PCI_D0);
3106        pci_enable_wake(pdev, 0, 0);
3107        pci_restore_state(pdev);
3108
3109        mac_wol_reset(vptr->mac_regs);
3110
3111        spin_lock_irqsave(&vptr->lock, flags);
3112        velocity_restore_context(vptr, &vptr->context);
3113        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3114        mac_disable_int(vptr->mac_regs);
3115
3116        velocity_tx_srv(vptr);
3117
3118        for (i = 0; i < vptr->tx.numq; i++) {
3119                if (vptr->tx.used[i])
3120                        mac_tx_queue_wake(vptr->mac_regs, i);
3121        }
3122
3123        mac_enable_int(vptr->mac_regs);
3124        spin_unlock_irqrestore(&vptr->lock, flags);
3125        netif_device_attach(vptr->dev);
3126
3127        return 0;
3128}
3129#endif
3130
3131/*
3132 *      Definition for our device driver. The PCI layer interface
3133 *      uses this to handle all our card discover and plugging
3134 */
3135static struct pci_driver velocity_driver = {
3136      .name     = VELOCITY_NAME,
3137      .id_table = velocity_id_table,
3138      .probe    = velocity_found1,
3139      .remove   = __devexit_p(velocity_remove1),
3140#ifdef CONFIG_PM
3141      .suspend  = velocity_suspend,
3142      .resume   = velocity_resume,
3143#endif
3144};
3145
3146
3147/**
3148 *      velocity_ethtool_up     -       pre hook for ethtool
3149 *      @dev: network device
3150 *
3151 *      Called before an ethtool operation. We need to make sure the
3152 *      chip is out of D3 state before we poke at it.
3153 */
3154static int velocity_ethtool_up(struct net_device *dev)
3155{
3156        struct velocity_info *vptr = netdev_priv(dev);
3157        if (!netif_running(dev))
3158                pci_set_power_state(vptr->pdev, PCI_D0);
3159        return 0;
3160}
3161
3162/**
3163 *      velocity_ethtool_down   -       post hook for ethtool
3164 *      @dev: network device
3165 *
3166 *      Called after an ethtool operation. Restore the chip back to D3
3167 *      state if it isn't running.
3168 */
3169static void velocity_ethtool_down(struct net_device *dev)
3170{
3171        struct velocity_info *vptr = netdev_priv(dev);
3172        if (!netif_running(dev))
3173                pci_set_power_state(vptr->pdev, PCI_D3hot);
3174}
3175
3176static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3177{
3178        struct velocity_info *vptr = netdev_priv(dev);
3179        struct mac_regs __iomem *regs = vptr->mac_regs;
3180        u32 status;
3181        status = check_connection_type(vptr->mac_regs);
3182
3183        cmd->supported = SUPPORTED_TP |
3184                        SUPPORTED_Autoneg |
3185                        SUPPORTED_10baseT_Half |
3186                        SUPPORTED_10baseT_Full |
3187                        SUPPORTED_100baseT_Half |
3188                        SUPPORTED_100baseT_Full |
3189                        SUPPORTED_1000baseT_Half |
3190                        SUPPORTED_1000baseT_Full;
3191
3192        cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3193        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3194                cmd->advertising |=
3195                        ADVERTISED_10baseT_Half |
3196                        ADVERTISED_10baseT_Full |
3197                        ADVERTISED_100baseT_Half |
3198                        ADVERTISED_100baseT_Full |
3199                        ADVERTISED_1000baseT_Half |
3200                        ADVERTISED_1000baseT_Full;
3201        } else {
3202                switch (vptr->options.spd_dpx) {
3203                case SPD_DPX_1000_FULL:
3204                        cmd->advertising |= ADVERTISED_1000baseT_Full;
3205                        break;
3206                case SPD_DPX_100_HALF:
3207                        cmd->advertising |= ADVERTISED_100baseT_Half;
3208                        break;
3209                case SPD_DPX_100_FULL:
3210                        cmd->advertising |= ADVERTISED_100baseT_Full;
3211                        break;
3212                case SPD_DPX_10_HALF:
3213                        cmd->advertising |= ADVERTISED_10baseT_Half;
3214                        break;
3215                case SPD_DPX_10_FULL:
3216                        cmd->advertising |= ADVERTISED_10baseT_Full;
3217                        break;
3218                default:
3219                        break;
3220                }
3221        }
3222        if (status & VELOCITY_SPEED_1000)
3223                cmd->speed = SPEED_1000;
3224        else if (status & VELOCITY_SPEED_100)
3225                cmd->speed = SPEED_100;
3226        else
3227                cmd->speed = SPEED_10;
3228        cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3229        cmd->port = PORT_TP;
3230        cmd->transceiver = XCVR_INTERNAL;
3231        cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3232
3233        if (status & VELOCITY_DUPLEX_FULL)
3234                cmd->duplex = DUPLEX_FULL;
3235        else
3236                cmd->duplex = DUPLEX_HALF;
3237
3238        return 0;
3239}
3240
3241static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3242{
3243        struct velocity_info *vptr = netdev_priv(dev);
3244        u32 curr_status;
3245        u32 new_status = 0;
3246        int ret = 0;
3247
3248        curr_status = check_connection_type(vptr->mac_regs);
3249        curr_status &= (~VELOCITY_LINK_FAIL);
3250
3251        new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3252        new_status |= ((cmd->speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3253        new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3254        new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3255        new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3256
3257        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3258            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3259                ret = -EINVAL;
3260        } else {
3261                enum speed_opt spd_dpx;
3262
3263                if (new_status & VELOCITY_AUTONEG_ENABLE)
3264                        spd_dpx = SPD_DPX_AUTO;
3265                else if ((new_status & VELOCITY_SPEED_1000) &&
3266                         (new_status & VELOCITY_DUPLEX_FULL)) {
3267                        spd_dpx = SPD_DPX_1000_FULL;
3268                } else if (new_status & VELOCITY_SPEED_100)
3269                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3270                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3271                else if (new_status & VELOCITY_SPEED_10)
3272                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3273                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3274                else
3275                        return -EOPNOTSUPP;
3276
3277                vptr->options.spd_dpx = spd_dpx;
3278
3279                velocity_set_media_mode(vptr, new_status);
3280        }
3281
3282        return ret;
3283}
3284
3285static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3286{
3287        struct velocity_info *vptr = netdev_priv(dev);
3288        strcpy(info->driver, VELOCITY_NAME);
3289        strcpy(info->version, VELOCITY_VERSION);
3290        strcpy(info->bus_info, pci_name(vptr->pdev));
3291}
3292
3293static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3294{
3295        struct velocity_info *vptr = netdev_priv(dev);
3296        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3297        wol->wolopts |= WAKE_MAGIC;
3298        /*
3299           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3300                   wol.wolopts|=WAKE_PHY;
3301                         */
3302        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3303                wol->wolopts |= WAKE_UCAST;
3304        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3305                wol->wolopts |= WAKE_ARP;
3306        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3307}
3308
3309static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3310{
3311        struct velocity_info *vptr = netdev_priv(dev);
3312
3313        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3314                return -EFAULT;
3315        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3316
3317        /*
3318           if (wol.wolopts & WAKE_PHY) {
3319           vptr->wol_opts|=VELOCITY_WOL_PHY;
3320           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3321           }
3322         */
3323
3324        if (wol->wolopts & WAKE_MAGIC) {
3325                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3326                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3327        }
3328        if (wol->wolopts & WAKE_UCAST) {
3329                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3330                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3331        }
3332        if (wol->wolopts & WAKE_ARP) {
3333                vptr->wol_opts |= VELOCITY_WOL_ARP;
3334                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3335        }
3336        memcpy(vptr->wol_passwd, wol->sopass, 6);
3337        return 0;
3338}
3339
3340static u32 velocity_get_msglevel(struct net_device *dev)
3341{
3342        return msglevel;
3343}
3344
3345static void velocity_set_msglevel(struct net_device *dev, u32 value)
3346{
3347         msglevel = value;
3348}
3349
3350static int get_pending_timer_val(int val)
3351{
3352        int mult_bits = val >> 6;
3353        int mult = 1;
3354
3355        switch (mult_bits)
3356        {
3357        case 1:
3358                mult = 4; break;
3359        case 2:
3360                mult = 16; break;
3361        case 3:
3362                mult = 64; break;
3363        case 0:
3364        default:
3365                break;
3366        }
3367
3368        return (val & 0x3f) * mult;
3369}
3370
3371static void set_pending_timer_val(int *val, u32 us)
3372{
3373        u8 mult = 0;
3374        u8 shift = 0;
3375
3376        if (us >= 0x3f) {
3377                mult = 1; /* mult with 4 */
3378                shift = 2;
3379        }
3380        if (us >= 0x3f * 4) {
3381                mult = 2; /* mult with 16 */
3382                shift = 4;
3383        }
3384        if (us >= 0x3f * 16) {
3385                mult = 3; /* mult with 64 */
3386                shift = 6;
3387        }
3388
3389        *val = (mult << 6) | ((us >> shift) & 0x3f);
3390}
3391
3392
3393static int velocity_get_coalesce(struct net_device *dev,
3394                struct ethtool_coalesce *ecmd)
3395{
3396        struct velocity_info *vptr = netdev_priv(dev);
3397
3398        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3399        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3400
3401        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3402        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3403
3404        return 0;
3405}
3406
3407static int velocity_set_coalesce(struct net_device *dev,
3408                struct ethtool_coalesce *ecmd)
3409{
3410        struct velocity_info *vptr = netdev_priv(dev);
3411        int max_us = 0x3f * 64;
3412        unsigned long flags;
3413
3414        /* 6 bits of  */
3415        if (ecmd->tx_coalesce_usecs > max_us)
3416                return -EINVAL;
3417        if (ecmd->rx_coalesce_usecs > max_us)
3418                return -EINVAL;
3419
3420        if (ecmd->tx_max_coalesced_frames > 0xff)
3421                return -EINVAL;
3422        if (ecmd->rx_max_coalesced_frames > 0xff)
3423                return -EINVAL;
3424
3425        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3426        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3427
3428        set_pending_timer_val(&vptr->options.rxqueue_timer,
3429                        ecmd->rx_coalesce_usecs);
3430        set_pending_timer_val(&vptr->options.txqueue_timer,
3431                        ecmd->tx_coalesce_usecs);
3432
3433        /* Setup the interrupt suppression and queue timers */
3434        spin_lock_irqsave(&vptr->lock, flags);
3435        mac_disable_int(vptr->mac_regs);
3436        setup_adaptive_interrupts(vptr);
3437        setup_queue_timers(vptr);
3438
3439        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3440        mac_clear_isr(vptr->mac_regs);
3441        mac_enable_int(vptr->mac_regs);
3442        spin_unlock_irqrestore(&vptr->lock, flags);
3443
3444        return 0;
3445}
3446
3447static const struct ethtool_ops velocity_ethtool_ops = {
3448        .get_settings   =       velocity_get_settings,
3449        .set_settings   =       velocity_set_settings,
3450        .get_drvinfo    =       velocity_get_drvinfo,
3451        .set_tx_csum    =       ethtool_op_set_tx_csum,
3452        .get_tx_csum    =       ethtool_op_get_tx_csum,
3453        .get_wol        =       velocity_ethtool_get_wol,
3454        .set_wol        =       velocity_ethtool_set_wol,
3455        .get_msglevel   =       velocity_get_msglevel,
3456        .set_msglevel   =       velocity_set_msglevel,
3457        .set_sg         =       ethtool_op_set_sg,
3458        .get_link       =       velocity_get_link,
3459        .get_coalesce   =       velocity_get_coalesce,
3460        .set_coalesce   =       velocity_set_coalesce,
3461        .begin          =       velocity_ethtool_up,
3462        .complete       =       velocity_ethtool_down
3463};
3464
3465#ifdef CONFIG_PM
3466#ifdef CONFIG_INET
3467static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3468{
3469        struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3470        struct net_device *dev = ifa->ifa_dev->dev;
3471
3472        if (dev_net(dev) == &init_net &&
3473            dev->netdev_ops == &velocity_netdev_ops)
3474                velocity_get_ip(netdev_priv(dev));
3475
3476        return NOTIFY_DONE;
3477}
3478#endif  /* CONFIG_INET */
3479#endif  /* CONFIG_PM */
3480
3481#if defined(CONFIG_PM) && defined(CONFIG_INET)
3482static struct notifier_block velocity_inetaddr_notifier = {
3483      .notifier_call    = velocity_netdev_event,
3484};
3485
3486static void velocity_register_notifier(void)
3487{
3488        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3489}
3490
3491static void velocity_unregister_notifier(void)
3492{
3493        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3494}
3495
3496#else
3497
3498#define velocity_register_notifier()    do {} while (0)
3499#define velocity_unregister_notifier()  do {} while (0)
3500
3501#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3502
3503/**
3504 *      velocity_init_module    -       load time function
3505 *
3506 *      Called when the velocity module is loaded. The PCI driver
3507 *      is registered with the PCI layer, and in turn will call
3508 *      the probe functions for each velocity adapter installed
3509 *      in the system.
3510 */
3511static int __init velocity_init_module(void)
3512{
3513        int ret;
3514
3515        velocity_register_notifier();
3516        ret = pci_register_driver(&velocity_driver);
3517        if (ret < 0)
3518                velocity_unregister_notifier();
3519        return ret;
3520}
3521
3522/**
3523 *      velocity_cleanup        -       module unload
3524 *
3525 *      When the velocity hardware is unloaded this function is called.
3526 *      It will clean up the notifiers and the unregister the PCI
3527 *      driver interface for this hardware. This in turn cleans up
3528 *      all discovered interfaces before returning from the function
3529 */
3530static void __exit velocity_cleanup_module(void)
3531{
3532        velocity_unregister_notifier();
3533        pci_unregister_driver(&velocity_driver);
3534}
3535
3536module_init(velocity_init_module);
3537module_exit(velocity_cleanup_module);
3538