linux/drivers/net/ethernet/via/via-velocity.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This code is derived from the VIA reference driver (copyright message
   4 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   5 * addition to the Linux kernel.
   6 *
   7 * The code has been merged into one source file, cleaned up to follow
   8 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   9 * for 64bit hardware platforms.
  10 *
  11 * TODO
  12 *      rx_copybreak/alignment
  13 *      More testing
  14 *
  15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  16 * Additional fixes and clean up: Francois Romieu
  17 *
  18 * This source has not been verified for use in safety critical systems.
  19 *
  20 * Please direct queries about the revamped driver to the linux-kernel
  21 * list not VIA.
  22 *
  23 * Original code:
  24 *
  25 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  26 * All rights reserved.
  27 *
  28 * Author: Chuang Liang-Shing, AJ Jiang
  29 *
  30 * Date: Jan 24, 2003
  31 *
  32 * MODULE_LICENSE("GPL");
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/types.h>
  37#include <linux/bitops.h>
  38#include <linux/init.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/mm.h>
  41#include <linux/errno.h>
  42#include <linux/ioport.h>
  43#include <linux/pci.h>
  44#include <linux/kernel.h>
  45#include <linux/netdevice.h>
  46#include <linux/etherdevice.h>
  47#include <linux/skbuff.h>
  48#include <linux/delay.h>
  49#include <linux/timer.h>
  50#include <linux/slab.h>
  51#include <linux/interrupt.h>
  52#include <linux/string.h>
  53#include <linux/wait.h>
  54#include <linux/io.h>
  55#include <linux/if.h>
  56#include <linux/uaccess.h>
  57#include <linux/proc_fs.h>
  58#include <linux/of_address.h>
  59#include <linux/of_device.h>
  60#include <linux/of_irq.h>
  61#include <linux/inetdevice.h>
  62#include <linux/platform_device.h>
  63#include <linux/reboot.h>
  64#include <linux/ethtool.h>
  65#include <linux/mii.h>
  66#include <linux/in.h>
  67#include <linux/if_arp.h>
  68#include <linux/if_vlan.h>
  69#include <linux/ip.h>
  70#include <linux/tcp.h>
  71#include <linux/udp.h>
  72#include <linux/crc-ccitt.h>
  73#include <linux/crc32.h>
  74
  75#include "via-velocity.h"
  76
  77enum velocity_bus_type {
  78        BUS_PCI,
  79        BUS_PLATFORM,
  80};
  81
  82static int velocity_nics;
  83static int msglevel = MSG_LEVEL_INFO;
  84
  85static void velocity_set_power_state(struct velocity_info *vptr, char state)
  86{
  87        void *addr = vptr->mac_regs;
  88
  89        if (vptr->pdev)
  90                pci_set_power_state(vptr->pdev, state);
  91        else
  92                writeb(state, addr + 0x154);
  93}
  94
  95/**
  96 *      mac_get_cam_mask        -       Read a CAM mask
  97 *      @regs: register block for this velocity
  98 *      @mask: buffer to store mask
  99 *
 100 *      Fetch the mask bits of the selected CAM and store them into the
 101 *      provided mask buffer.
 102 */
 103static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 104{
 105        int i;
 106
 107        /* Select CAM mask */
 108        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 109
 110        writeb(0, &regs->CAMADDR);
 111
 112        /* read mask */
 113        for (i = 0; i < 8; i++)
 114                *mask++ = readb(&(regs->MARCAM[i]));
 115
 116        /* disable CAMEN */
 117        writeb(0, &regs->CAMADDR);
 118
 119        /* Select mar */
 120        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 121}
 122
 123/**
 124 *      mac_set_cam_mask        -       Set a CAM mask
 125 *      @regs: register block for this velocity
 126 *      @mask: CAM mask to load
 127 *
 128 *      Store a new mask into a CAM
 129 */
 130static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 131{
 132        int i;
 133        /* Select CAM mask */
 134        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 135
 136        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 137
 138        for (i = 0; i < 8; i++)
 139                writeb(*mask++, &(regs->MARCAM[i]));
 140
 141        /* disable CAMEN */
 142        writeb(0, &regs->CAMADDR);
 143
 144        /* Select mar */
 145        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 146}
 147
 148static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 149{
 150        int i;
 151        /* Select CAM mask */
 152        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 153
 154        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 155
 156        for (i = 0; i < 8; i++)
 157                writeb(*mask++, &(regs->MARCAM[i]));
 158
 159        /* disable CAMEN */
 160        writeb(0, &regs->CAMADDR);
 161
 162        /* Select mar */
 163        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 164}
 165
 166/**
 167 *      mac_set_cam     -       set CAM data
 168 *      @regs: register block of this velocity
 169 *      @idx: Cam index
 170 *      @addr: 2 or 6 bytes of CAM data
 171 *
 172 *      Load an address or vlan tag into a CAM
 173 */
 174static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 175{
 176        int i;
 177
 178        /* Select CAM mask */
 179        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 180
 181        idx &= (64 - 1);
 182
 183        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 184
 185        for (i = 0; i < 6; i++)
 186                writeb(*addr++, &(regs->MARCAM[i]));
 187
 188        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 189
 190        udelay(10);
 191
 192        writeb(0, &regs->CAMADDR);
 193
 194        /* Select mar */
 195        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 196}
 197
 198static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 199                             const u8 *addr)
 200{
 201
 202        /* Select CAM mask */
 203        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 204
 205        idx &= (64 - 1);
 206
 207        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 208        writew(*((u16 *) addr), &regs->MARCAM[0]);
 209
 210        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 211
 212        udelay(10);
 213
 214        writeb(0, &regs->CAMADDR);
 215
 216        /* Select mar */
 217        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 218}
 219
 220
 221/**
 222 *      mac_wol_reset   -       reset WOL after exiting low power
 223 *      @regs: register block of this velocity
 224 *
 225 *      Called after we drop out of wake on lan mode in order to
 226 *      reset the Wake on lan features. This function doesn't restore
 227 *      the rest of the logic from the result of sleep/wakeup
 228 */
 229static void mac_wol_reset(struct mac_regs __iomem *regs)
 230{
 231
 232        /* Turn off SWPTAG right after leaving power mode */
 233        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 234        /* clear sticky bits */
 235        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 236
 237        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 238        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 239        /* disable force PME-enable */
 240        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 241        /* disable power-event config bit */
 242        writew(0xFFFF, &regs->WOLCRClr);
 243        /* clear power status */
 244        writew(0xFFFF, &regs->WOLSRClr);
 245}
 246
 247static const struct ethtool_ops velocity_ethtool_ops;
 248
 249/*
 250    Define module options
 251*/
 252
 253MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 254MODULE_LICENSE("GPL");
 255MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 256
 257#define VELOCITY_PARAM(N, D) \
 258        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 259        module_param_array(N, int, NULL, 0); \
 260        MODULE_PARM_DESC(N, D);
 261
 262#define RX_DESC_MIN     64
 263#define RX_DESC_MAX     255
 264#define RX_DESC_DEF     64
 265VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 266
 267#define TX_DESC_MIN     16
 268#define TX_DESC_MAX     256
 269#define TX_DESC_DEF     64
 270VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 271
 272#define RX_THRESH_MIN   0
 273#define RX_THRESH_MAX   3
 274#define RX_THRESH_DEF   0
 275/* rx_thresh[] is used for controlling the receive fifo threshold.
 276   0: indicate the rxfifo threshold is 128 bytes.
 277   1: indicate the rxfifo threshold is 512 bytes.
 278   2: indicate the rxfifo threshold is 1024 bytes.
 279   3: indicate the rxfifo threshold is store & forward.
 280*/
 281VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 282
 283#define DMA_LENGTH_MIN  0
 284#define DMA_LENGTH_MAX  7
 285#define DMA_LENGTH_DEF  6
 286
 287/* DMA_length[] is used for controlling the DMA length
 288   0: 8 DWORDs
 289   1: 16 DWORDs
 290   2: 32 DWORDs
 291   3: 64 DWORDs
 292   4: 128 DWORDs
 293   5: 256 DWORDs
 294   6: SF(flush till emply)
 295   7: SF(flush till emply)
 296*/
 297VELOCITY_PARAM(DMA_length, "DMA length");
 298
 299#define IP_ALIG_DEF     0
 300/* IP_byte_align[] is used for IP header DWORD byte aligned
 301   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 302   1: indicate the IP header will be DWORD byte aligned.
 303      In some environment, the IP header should be DWORD byte aligned,
 304      or the packet will be droped when we receive it. (eg: IPVS)
 305*/
 306VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 307
 308#define FLOW_CNTL_DEF   1
 309#define FLOW_CNTL_MIN   1
 310#define FLOW_CNTL_MAX   5
 311
 312/* flow_control[] is used for setting the flow control ability of NIC.
 313   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 314   2: enable TX flow control.
 315   3: enable RX flow control.
 316   4: enable RX/TX flow control.
 317   5: disable
 318*/
 319VELOCITY_PARAM(flow_control, "Enable flow control ability");
 320
 321#define MED_LNK_DEF 0
 322#define MED_LNK_MIN 0
 323#define MED_LNK_MAX 5
 324/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 325   0: indicate autonegotiation for both speed and duplex mode
 326   1: indicate 100Mbps half duplex mode
 327   2: indicate 100Mbps full duplex mode
 328   3: indicate 10Mbps half duplex mode
 329   4: indicate 10Mbps full duplex mode
 330   5: indicate 1000Mbps full duplex mode
 331
 332   Note:
 333   if EEPROM have been set to the force mode, this option is ignored
 334   by driver.
 335*/
 336VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 337
 338#define WOL_OPT_DEF     0
 339#define WOL_OPT_MIN     0
 340#define WOL_OPT_MAX     7
 341/* wol_opts[] is used for controlling wake on lan behavior.
 342   0: Wake up if recevied a magic packet. (Default)
 343   1: Wake up if link status is on/off.
 344   2: Wake up if recevied an arp packet.
 345   4: Wake up if recevied any unicast packet.
 346   Those value can be sumed up to support more than one option.
 347*/
 348VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 349
 350static int rx_copybreak = 200;
 351module_param(rx_copybreak, int, 0644);
 352MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 353
 354/*
 355 *      Internal board variants. At the moment we have only one
 356 */
 357static struct velocity_info_tbl chip_info_table[] = {
 358        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 359        { }
 360};
 361
 362/*
 363 *      Describe the PCI device identifiers that we support in this
 364 *      device driver. Used for hotplug autoloading.
 365 */
 366
 367static const struct pci_device_id velocity_pci_id_table[] = {
 368        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 369        { }
 370};
 371
 372MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
 373
 374/**
 375 *      Describe the OF device identifiers that we support in this
 376 *      device driver. Used for devicetree nodes.
 377 */
 378static const struct of_device_id velocity_of_ids[] = {
 379        { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
 380        { /* Sentinel */ },
 381};
 382MODULE_DEVICE_TABLE(of, velocity_of_ids);
 383
 384/**
 385 *      get_chip_name   -       identifier to name
 386 *      @id: chip identifier
 387 *
 388 *      Given a chip identifier return a suitable description. Returns
 389 *      a pointer a static string valid while the driver is loaded.
 390 */
 391static const char *get_chip_name(enum chip_type chip_id)
 392{
 393        int i;
 394        for (i = 0; chip_info_table[i].name != NULL; i++)
 395                if (chip_info_table[i].chip_id == chip_id)
 396                        break;
 397        return chip_info_table[i].name;
 398}
 399
 400/**
 401 *      velocity_set_int_opt    -       parser for integer options
 402 *      @opt: pointer to option value
 403 *      @val: value the user requested (or -1 for default)
 404 *      @min: lowest value allowed
 405 *      @max: highest value allowed
 406 *      @def: default value
 407 *      @name: property name
 408 *      @dev: device name
 409 *
 410 *      Set an integer property in the module options. This function does
 411 *      all the verification and checking as well as reporting so that
 412 *      we don't duplicate code for each option.
 413 */
 414static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
 415                                 char *name, const char *devname)
 416{
 417        if (val == -1)
 418                *opt = def;
 419        else if (val < min || val > max) {
 420                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 421                                        devname, name, min, max);
 422                *opt = def;
 423        } else {
 424                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 425                                        devname, name, val);
 426                *opt = val;
 427        }
 428}
 429
 430/**
 431 *      velocity_set_bool_opt   -       parser for boolean options
 432 *      @opt: pointer to option value
 433 *      @val: value the user requested (or -1 for default)
 434 *      @def: default value (yes/no)
 435 *      @flag: numeric value to set for true.
 436 *      @name: property name
 437 *      @dev: device name
 438 *
 439 *      Set a boolean property in the module options. This function does
 440 *      all the verification and checking as well as reporting so that
 441 *      we don't duplicate code for each option.
 442 */
 443static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
 444                                  char *name, const char *devname)
 445{
 446        (*opt) &= (~flag);
 447        if (val == -1)
 448                *opt |= (def ? flag : 0);
 449        else if (val < 0 || val > 1) {
 450                printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 451                        devname, name);
 452                *opt |= (def ? flag : 0);
 453        } else {
 454                printk(KERN_INFO "%s: set parameter %s to %s\n",
 455                        devname, name, val ? "TRUE" : "FALSE");
 456                *opt |= (val ? flag : 0);
 457        }
 458}
 459
 460/**
 461 *      velocity_get_options    -       set options on device
 462 *      @opts: option structure for the device
 463 *      @index: index of option to use in module options array
 464 *      @devname: device name
 465 *
 466 *      Turn the module and command options into a single structure
 467 *      for the current device
 468 */
 469static void velocity_get_options(struct velocity_opt *opts, int index,
 470                                 const char *devname)
 471{
 472
 473        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 474        velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 475        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 476        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 477
 478        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 479        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 480        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 481        velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 482        opts->numrx = (opts->numrx & ~3);
 483}
 484
 485/**
 486 *      velocity_init_cam_filter        -       initialise CAM
 487 *      @vptr: velocity to program
 488 *
 489 *      Initialize the content addressable memory used for filters. Load
 490 *      appropriately according to the presence of VLAN
 491 */
 492static void velocity_init_cam_filter(struct velocity_info *vptr)
 493{
 494        struct mac_regs __iomem *regs = vptr->mac_regs;
 495        unsigned int vid, i = 0;
 496
 497        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 498        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 499        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 500
 501        /* Disable all CAMs */
 502        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 503        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 504        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 505        mac_set_cam_mask(regs, vptr->mCAMmask);
 506
 507        /* Enable VCAMs */
 508        for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 509                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 510                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 511                if (++i >= VCAM_SIZE)
 512                        break;
 513        }
 514        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 515}
 516
 517static int velocity_vlan_rx_add_vid(struct net_device *dev,
 518                                    __be16 proto, u16 vid)
 519{
 520        struct velocity_info *vptr = netdev_priv(dev);
 521
 522        spin_lock_irq(&vptr->lock);
 523        set_bit(vid, vptr->active_vlans);
 524        velocity_init_cam_filter(vptr);
 525        spin_unlock_irq(&vptr->lock);
 526        return 0;
 527}
 528
 529static int velocity_vlan_rx_kill_vid(struct net_device *dev,
 530                                     __be16 proto, u16 vid)
 531{
 532        struct velocity_info *vptr = netdev_priv(dev);
 533
 534        spin_lock_irq(&vptr->lock);
 535        clear_bit(vid, vptr->active_vlans);
 536        velocity_init_cam_filter(vptr);
 537        spin_unlock_irq(&vptr->lock);
 538        return 0;
 539}
 540
 541static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 542{
 543        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 544}
 545
 546/**
 547 *      velocity_rx_reset       -       handle a receive reset
 548 *      @vptr: velocity we are resetting
 549 *
 550 *      Reset the ownership and status for the receive ring side.
 551 *      Hand all the receive queue to the NIC.
 552 */
 553static void velocity_rx_reset(struct velocity_info *vptr)
 554{
 555
 556        struct mac_regs __iomem *regs = vptr->mac_regs;
 557        int i;
 558
 559        velocity_init_rx_ring_indexes(vptr);
 560
 561        /*
 562         *      Init state, all RD entries belong to the NIC
 563         */
 564        for (i = 0; i < vptr->options.numrx; ++i)
 565                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 566
 567        writew(vptr->options.numrx, &regs->RBRDU);
 568        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 569        writew(0, &regs->RDIdx);
 570        writew(vptr->options.numrx - 1, &regs->RDCSize);
 571}
 572
 573/**
 574 *      velocity_get_opt_media_mode     -       get media selection
 575 *      @vptr: velocity adapter
 576 *
 577 *      Get the media mode stored in EEPROM or module options and load
 578 *      mii_status accordingly. The requested link state information
 579 *      is also returned.
 580 */
 581static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 582{
 583        u32 status = 0;
 584
 585        switch (vptr->options.spd_dpx) {
 586        case SPD_DPX_AUTO:
 587                status = VELOCITY_AUTONEG_ENABLE;
 588                break;
 589        case SPD_DPX_100_FULL:
 590                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 591                break;
 592        case SPD_DPX_10_FULL:
 593                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 594                break;
 595        case SPD_DPX_100_HALF:
 596                status = VELOCITY_SPEED_100;
 597                break;
 598        case SPD_DPX_10_HALF:
 599                status = VELOCITY_SPEED_10;
 600                break;
 601        case SPD_DPX_1000_FULL:
 602                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 603                break;
 604        }
 605        vptr->mii_status = status;
 606        return status;
 607}
 608
 609/**
 610 *      safe_disable_mii_autopoll       -       autopoll off
 611 *      @regs: velocity registers
 612 *
 613 *      Turn off the autopoll and wait for it to disable on the chip
 614 */
 615static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 616{
 617        u16 ww;
 618
 619        /*  turn off MAUTO */
 620        writeb(0, &regs->MIICR);
 621        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 622                udelay(1);
 623                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 624                        break;
 625        }
 626}
 627
 628/**
 629 *      enable_mii_autopoll     -       turn on autopolling
 630 *      @regs: velocity registers
 631 *
 632 *      Enable the MII link status autopoll feature on the Velocity
 633 *      hardware. Wait for it to enable.
 634 */
 635static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 636{
 637        int ii;
 638
 639        writeb(0, &(regs->MIICR));
 640        writeb(MIIADR_SWMPL, &regs->MIIADR);
 641
 642        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 643                udelay(1);
 644                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 645                        break;
 646        }
 647
 648        writeb(MIICR_MAUTO, &regs->MIICR);
 649
 650        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 651                udelay(1);
 652                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 653                        break;
 654        }
 655
 656}
 657
 658/**
 659 *      velocity_mii_read       -       read MII data
 660 *      @regs: velocity registers
 661 *      @index: MII register index
 662 *      @data: buffer for received data
 663 *
 664 *      Perform a single read of an MII 16bit register. Returns zero
 665 *      on success or -ETIMEDOUT if the PHY did not respond.
 666 */
 667static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 668{
 669        u16 ww;
 670
 671        /*
 672         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 673         */
 674        safe_disable_mii_autopoll(regs);
 675
 676        writeb(index, &regs->MIIADR);
 677
 678        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 679
 680        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 681                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 682                        break;
 683        }
 684
 685        *data = readw(&regs->MIIDATA);
 686
 687        enable_mii_autopoll(regs);
 688        if (ww == W_MAX_TIMEOUT)
 689                return -ETIMEDOUT;
 690        return 0;
 691}
 692
 693/**
 694 *      mii_check_media_mode    -       check media state
 695 *      @regs: velocity registers
 696 *
 697 *      Check the current MII status and determine the link status
 698 *      accordingly
 699 */
 700static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 701{
 702        u32 status = 0;
 703        u16 ANAR;
 704
 705        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 706                status |= VELOCITY_LINK_FAIL;
 707
 708        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 709                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 710        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 711                status |= (VELOCITY_SPEED_1000);
 712        else {
 713                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 714                if (ANAR & ADVERTISE_100FULL)
 715                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 716                else if (ANAR & ADVERTISE_100HALF)
 717                        status |= VELOCITY_SPEED_100;
 718                else if (ANAR & ADVERTISE_10FULL)
 719                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 720                else
 721                        status |= (VELOCITY_SPEED_10);
 722        }
 723
 724        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 725                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 726                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 727                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 728                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 729                                status |= VELOCITY_AUTONEG_ENABLE;
 730                }
 731        }
 732
 733        return status;
 734}
 735
 736/**
 737 *      velocity_mii_write      -       write MII data
 738 *      @regs: velocity registers
 739 *      @index: MII register index
 740 *      @data: 16bit data for the MII register
 741 *
 742 *      Perform a single write to an MII 16bit register. Returns zero
 743 *      on success or -ETIMEDOUT if the PHY did not respond.
 744 */
 745static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 746{
 747        u16 ww;
 748
 749        /*
 750         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 751         */
 752        safe_disable_mii_autopoll(regs);
 753
 754        /* MII reg offset */
 755        writeb(mii_addr, &regs->MIIADR);
 756        /* set MII data */
 757        writew(data, &regs->MIIDATA);
 758
 759        /* turn on MIICR_WCMD */
 760        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 761
 762        /* W_MAX_TIMEOUT is the timeout period */
 763        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 764                udelay(5);
 765                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 766                        break;
 767        }
 768        enable_mii_autopoll(regs);
 769
 770        if (ww == W_MAX_TIMEOUT)
 771                return -ETIMEDOUT;
 772        return 0;
 773}
 774
 775/**
 776 *      set_mii_flow_control    -       flow control setup
 777 *      @vptr: velocity interface
 778 *
 779 *      Set up the flow control on this interface according to
 780 *      the supplied user/eeprom options.
 781 */
 782static void set_mii_flow_control(struct velocity_info *vptr)
 783{
 784        /*Enable or Disable PAUSE in ANAR */
 785        switch (vptr->options.flow_cntl) {
 786        case FLOW_CNTL_TX:
 787                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 788                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 789                break;
 790
 791        case FLOW_CNTL_RX:
 792                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 793                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 794                break;
 795
 796        case FLOW_CNTL_TX_RX:
 797                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 798                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 799                break;
 800
 801        case FLOW_CNTL_DISABLE:
 802                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 803                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 804                break;
 805        default:
 806                break;
 807        }
 808}
 809
 810/**
 811 *      mii_set_auto_on         -       autonegotiate on
 812 *      @vptr: velocity
 813 *
 814 *      Enable autonegotation on this interface
 815 */
 816static void mii_set_auto_on(struct velocity_info *vptr)
 817{
 818        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 819                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 820        else
 821                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 822}
 823
 824static u32 check_connection_type(struct mac_regs __iomem *regs)
 825{
 826        u32 status = 0;
 827        u8 PHYSR0;
 828        u16 ANAR;
 829        PHYSR0 = readb(&regs->PHYSR0);
 830
 831        /*
 832           if (!(PHYSR0 & PHYSR0_LINKGD))
 833           status|=VELOCITY_LINK_FAIL;
 834         */
 835
 836        if (PHYSR0 & PHYSR0_FDPX)
 837                status |= VELOCITY_DUPLEX_FULL;
 838
 839        if (PHYSR0 & PHYSR0_SPDG)
 840                status |= VELOCITY_SPEED_1000;
 841        else if (PHYSR0 & PHYSR0_SPD10)
 842                status |= VELOCITY_SPEED_10;
 843        else
 844                status |= VELOCITY_SPEED_100;
 845
 846        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 847                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 848                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 849                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 850                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 851                                status |= VELOCITY_AUTONEG_ENABLE;
 852                }
 853        }
 854
 855        return status;
 856}
 857
 858/**
 859 *      velocity_set_media_mode         -       set media mode
 860 *      @mii_status: old MII link state
 861 *
 862 *      Check the media link state and configure the flow control
 863 *      PHY and also velocity hardware setup accordingly. In particular
 864 *      we need to set up CD polling and frame bursting.
 865 */
 866static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 867{
 868        u32 curr_status;
 869        struct mac_regs __iomem *regs = vptr->mac_regs;
 870
 871        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 872        curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 873
 874        /* Set mii link status */
 875        set_mii_flow_control(vptr);
 876
 877        /*
 878           Check if new status is consistent with current status
 879           if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 880               (mii_status==curr_status)) {
 881           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 882           vptr->mii_status=check_connection_type(vptr->mac_regs);
 883           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 884           return 0;
 885           }
 886         */
 887
 888        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 889                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 890
 891        /*
 892         *      If connection type is AUTO
 893         */
 894        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 895                VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 896                /* clear force MAC mode bit */
 897                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 898                /* set duplex mode of MAC according to duplex mode of MII */
 899                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 900                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 901                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 902
 903                /* enable AUTO-NEGO mode */
 904                mii_set_auto_on(vptr);
 905        } else {
 906                u16 CTRL1000;
 907                u16 ANAR;
 908                u8 CHIPGCR;
 909
 910                /*
 911                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 912                 *    and enable it in fullduplex mode
 913                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 914                 * 3. only enable CD heart beat counter in 10HD mode
 915                 */
 916
 917                /* set force MAC mode bit */
 918                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 919
 920                CHIPGCR = readb(&regs->CHIPGCR);
 921
 922                if (mii_status & VELOCITY_SPEED_1000)
 923                        CHIPGCR |= CHIPGCR_FCGMII;
 924                else
 925                        CHIPGCR &= ~CHIPGCR_FCGMII;
 926
 927                if (mii_status & VELOCITY_DUPLEX_FULL) {
 928                        CHIPGCR |= CHIPGCR_FCFDX;
 929                        writeb(CHIPGCR, &regs->CHIPGCR);
 930                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 931                        if (vptr->rev_id < REV_ID_VT3216_A0)
 932                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 933                } else {
 934                        CHIPGCR &= ~CHIPGCR_FCFDX;
 935                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 936                        writeb(CHIPGCR, &regs->CHIPGCR);
 937                        if (vptr->rev_id < REV_ID_VT3216_A0)
 938                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 939                }
 940
 941                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 942                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 943                if ((mii_status & VELOCITY_SPEED_1000) &&
 944                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 945                        CTRL1000 |= ADVERTISE_1000FULL;
 946                }
 947                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 948
 949                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 950                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 951                else
 952                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 953
 954                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 955                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 956                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 957                if (mii_status & VELOCITY_SPEED_100) {
 958                        if (mii_status & VELOCITY_DUPLEX_FULL)
 959                                ANAR |= ADVERTISE_100FULL;
 960                        else
 961                                ANAR |= ADVERTISE_100HALF;
 962                } else if (mii_status & VELOCITY_SPEED_10) {
 963                        if (mii_status & VELOCITY_DUPLEX_FULL)
 964                                ANAR |= ADVERTISE_10FULL;
 965                        else
 966                                ANAR |= ADVERTISE_10HALF;
 967                }
 968                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 969                /* enable AUTO-NEGO mode */
 970                mii_set_auto_on(vptr);
 971                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 972        }
 973        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 974        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 975        return VELOCITY_LINK_CHANGE;
 976}
 977
 978/**
 979 *      velocity_print_link_status      -       link status reporting
 980 *      @vptr: velocity to report on
 981 *
 982 *      Turn the link status of the velocity card into a kernel log
 983 *      description of the new link state, detailing speed and duplex
 984 *      status
 985 */
 986static void velocity_print_link_status(struct velocity_info *vptr)
 987{
 988
 989        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
 990                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
 991        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
 992                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
 993
 994                if (vptr->mii_status & VELOCITY_SPEED_1000)
 995                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
 996                else if (vptr->mii_status & VELOCITY_SPEED_100)
 997                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
 998                else
 999                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1000
1001                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1002                        VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1003                else
1004                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1005        } else {
1006                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
1007                switch (vptr->options.spd_dpx) {
1008                case SPD_DPX_1000_FULL:
1009                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1010                        break;
1011                case SPD_DPX_100_HALF:
1012                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1013                        break;
1014                case SPD_DPX_100_FULL:
1015                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1016                        break;
1017                case SPD_DPX_10_HALF:
1018                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1019                        break;
1020                case SPD_DPX_10_FULL:
1021                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1022                        break;
1023                default:
1024                        break;
1025                }
1026        }
1027}
1028
1029/**
1030 *      enable_flow_control_ability     -       flow control
1031 *      @vptr: veloity to configure
1032 *
1033 *      Set up flow control according to the flow control options
1034 *      determined by the eeprom/configuration.
1035 */
1036static void enable_flow_control_ability(struct velocity_info *vptr)
1037{
1038
1039        struct mac_regs __iomem *regs = vptr->mac_regs;
1040
1041        switch (vptr->options.flow_cntl) {
1042
1043        case FLOW_CNTL_DEFAULT:
1044                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1045                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1046                else
1047                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1048
1049                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1050                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1051                else
1052                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1053                break;
1054
1055        case FLOW_CNTL_TX:
1056                writel(CR0_FDXTFCEN, &regs->CR0Set);
1057                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1058                break;
1059
1060        case FLOW_CNTL_RX:
1061                writel(CR0_FDXRFCEN, &regs->CR0Set);
1062                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1063                break;
1064
1065        case FLOW_CNTL_TX_RX:
1066                writel(CR0_FDXTFCEN, &regs->CR0Set);
1067                writel(CR0_FDXRFCEN, &regs->CR0Set);
1068                break;
1069
1070        case FLOW_CNTL_DISABLE:
1071                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1072                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1073                break;
1074
1075        default:
1076                break;
1077        }
1078
1079}
1080
1081/**
1082 *      velocity_soft_reset     -       soft reset
1083 *      @vptr: velocity to reset
1084 *
1085 *      Kick off a soft reset of the velocity adapter and then poll
1086 *      until the reset sequence has completed before returning.
1087 */
1088static int velocity_soft_reset(struct velocity_info *vptr)
1089{
1090        struct mac_regs __iomem *regs = vptr->mac_regs;
1091        int i = 0;
1092
1093        writel(CR0_SFRST, &regs->CR0Set);
1094
1095        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1096                udelay(5);
1097                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1098                        break;
1099        }
1100
1101        if (i == W_MAX_TIMEOUT) {
1102                writel(CR0_FORSRST, &regs->CR0Set);
1103                /* FIXME: PCI POSTING */
1104                /* delay 2ms */
1105                mdelay(2);
1106        }
1107        return 0;
1108}
1109
1110/**
1111 *      velocity_set_multi      -       filter list change callback
1112 *      @dev: network device
1113 *
1114 *      Called by the network layer when the filter lists need to change
1115 *      for a velocity adapter. Reload the CAMs with the new address
1116 *      filter ruleset.
1117 */
1118static void velocity_set_multi(struct net_device *dev)
1119{
1120        struct velocity_info *vptr = netdev_priv(dev);
1121        struct mac_regs __iomem *regs = vptr->mac_regs;
1122        u8 rx_mode;
1123        int i;
1124        struct netdev_hw_addr *ha;
1125
1126        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1127                writel(0xffffffff, &regs->MARCAM[0]);
1128                writel(0xffffffff, &regs->MARCAM[4]);
1129                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1130        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1131                   (dev->flags & IFF_ALLMULTI)) {
1132                writel(0xffffffff, &regs->MARCAM[0]);
1133                writel(0xffffffff, &regs->MARCAM[4]);
1134                rx_mode = (RCR_AM | RCR_AB);
1135        } else {
1136                int offset = MCAM_SIZE - vptr->multicast_limit;
1137                mac_get_cam_mask(regs, vptr->mCAMmask);
1138
1139                i = 0;
1140                netdev_for_each_mc_addr(ha, dev) {
1141                        mac_set_cam(regs, i + offset, ha->addr);
1142                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1143                        i++;
1144                }
1145
1146                mac_set_cam_mask(regs, vptr->mCAMmask);
1147                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1148        }
1149        if (dev->mtu > 1500)
1150                rx_mode |= RCR_AL;
1151
1152        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1153
1154}
1155
1156/*
1157 * MII access , media link mode setting functions
1158 */
1159
1160/**
1161 *      mii_init        -       set up MII
1162 *      @vptr: velocity adapter
1163 *      @mii_status:  links tatus
1164 *
1165 *      Set up the PHY for the current link state.
1166 */
1167static void mii_init(struct velocity_info *vptr, u32 mii_status)
1168{
1169        u16 BMCR;
1170
1171        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1172        case PHYID_ICPLUS_IP101A:
1173                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1174                                                MII_ADVERTISE, vptr->mac_regs);
1175                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1176                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1177                                                                vptr->mac_regs);
1178                else
1179                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1180                                                                vptr->mac_regs);
1181                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1182                break;
1183        case PHYID_CICADA_CS8201:
1184                /*
1185                 *      Reset to hardware default
1186                 */
1187                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1188                /*
1189                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1190                 *      off it in NWay-forced half mode for NWay-forced v.s.
1191                 *      legacy-forced issue.
1192                 */
1193                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1195                else
1196                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1197                /*
1198                 *      Turn on Link/Activity LED enable bit for CIS8201
1199                 */
1200                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1201                break;
1202        case PHYID_VT3216_32BIT:
1203        case PHYID_VT3216_64BIT:
1204                /*
1205                 *      Reset to hardware default
1206                 */
1207                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1208                /*
1209                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1210                 *      off it in NWay-forced half mode for NWay-forced v.s.
1211                 *      legacy-forced issue
1212                 */
1213                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1214                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215                else
1216                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1217                break;
1218
1219        case PHYID_MARVELL_1000:
1220        case PHYID_MARVELL_1000S:
1221                /*
1222                 *      Assert CRS on Transmit
1223                 */
1224                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1225                /*
1226                 *      Reset to hardware default
1227                 */
1228                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1229                break;
1230        default:
1231                ;
1232        }
1233        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1234        if (BMCR & BMCR_ISOLATE) {
1235                BMCR &= ~BMCR_ISOLATE;
1236                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1237        }
1238}
1239
1240/**
1241 * setup_queue_timers   -       Setup interrupt timers
1242 *
1243 * Setup interrupt frequency during suppression (timeout if the frame
1244 * count isn't filled).
1245 */
1246static void setup_queue_timers(struct velocity_info *vptr)
1247{
1248        /* Only for newer revisions */
1249        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1250                u8 txqueue_timer = 0;
1251                u8 rxqueue_timer = 0;
1252
1253                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1254                                VELOCITY_SPEED_100)) {
1255                        txqueue_timer = vptr->options.txqueue_timer;
1256                        rxqueue_timer = vptr->options.rxqueue_timer;
1257                }
1258
1259                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1260                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1261        }
1262}
1263
1264/**
1265 * setup_adaptive_interrupts  -  Setup interrupt suppression
1266 *
1267 * @vptr velocity adapter
1268 *
1269 * The velocity is able to suppress interrupt during high interrupt load.
1270 * This function turns on that feature.
1271 */
1272static void setup_adaptive_interrupts(struct velocity_info *vptr)
1273{
1274        struct mac_regs __iomem *regs = vptr->mac_regs;
1275        u16 tx_intsup = vptr->options.tx_intsup;
1276        u16 rx_intsup = vptr->options.rx_intsup;
1277
1278        /* Setup default interrupt mask (will be changed below) */
1279        vptr->int_mask = INT_MASK_DEF;
1280
1281        /* Set Tx Interrupt Suppression Threshold */
1282        writeb(CAMCR_PS0, &regs->CAMCR);
1283        if (tx_intsup != 0) {
1284                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1285                                ISR_PTX2I | ISR_PTX3I);
1286                writew(tx_intsup, &regs->ISRCTL);
1287        } else
1288                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1289
1290        /* Set Rx Interrupt Suppression Threshold */
1291        writeb(CAMCR_PS1, &regs->CAMCR);
1292        if (rx_intsup != 0) {
1293                vptr->int_mask &= ~ISR_PRXI;
1294                writew(rx_intsup, &regs->ISRCTL);
1295        } else
1296                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1297
1298        /* Select page to interrupt hold timer */
1299        writeb(0, &regs->CAMCR);
1300}
1301
1302/**
1303 *      velocity_init_registers -       initialise MAC registers
1304 *      @vptr: velocity to init
1305 *      @type: type of initialisation (hot or cold)
1306 *
1307 *      Initialise the MAC on a reset or on first set up on the
1308 *      hardware.
1309 */
1310static void velocity_init_registers(struct velocity_info *vptr,
1311                                    enum velocity_init_type type)
1312{
1313        struct mac_regs __iomem *regs = vptr->mac_regs;
1314        struct net_device *netdev = vptr->netdev;
1315        int i, mii_status;
1316
1317        mac_wol_reset(regs);
1318
1319        switch (type) {
1320        case VELOCITY_INIT_RESET:
1321        case VELOCITY_INIT_WOL:
1322
1323                netif_stop_queue(netdev);
1324
1325                /*
1326                 *      Reset RX to prevent RX pointer not on the 4X location
1327                 */
1328                velocity_rx_reset(vptr);
1329                mac_rx_queue_run(regs);
1330                mac_rx_queue_wake(regs);
1331
1332                mii_status = velocity_get_opt_media_mode(vptr);
1333                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1334                        velocity_print_link_status(vptr);
1335                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1336                                netif_wake_queue(netdev);
1337                }
1338
1339                enable_flow_control_ability(vptr);
1340
1341                mac_clear_isr(regs);
1342                writel(CR0_STOP, &regs->CR0Clr);
1343                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1344                                                        &regs->CR0Set);
1345
1346                break;
1347
1348        case VELOCITY_INIT_COLD:
1349        default:
1350                /*
1351                 *      Do reset
1352                 */
1353                velocity_soft_reset(vptr);
1354                mdelay(5);
1355
1356                if (!vptr->no_eeprom) {
1357                        mac_eeprom_reload(regs);
1358                        for (i = 0; i < 6; i++)
1359                                writeb(netdev->dev_addr[i], regs->PAR + i);
1360                }
1361
1362                /*
1363                 *      clear Pre_ACPI bit.
1364                 */
1365                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1366                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1367                mac_set_dma_length(regs, vptr->options.DMA_length);
1368
1369                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1370                /*
1371                 *      Back off algorithm use original IEEE standard
1372                 */
1373                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1374
1375                /*
1376                 *      Init CAM filter
1377                 */
1378                velocity_init_cam_filter(vptr);
1379
1380                /*
1381                 *      Set packet filter: Receive directed and broadcast address
1382                 */
1383                velocity_set_multi(netdev);
1384
1385                /*
1386                 *      Enable MII auto-polling
1387                 */
1388                enable_mii_autopoll(regs);
1389
1390                setup_adaptive_interrupts(vptr);
1391
1392                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1393                writew(vptr->options.numrx - 1, &regs->RDCSize);
1394                mac_rx_queue_run(regs);
1395                mac_rx_queue_wake(regs);
1396
1397                writew(vptr->options.numtx - 1, &regs->TDCSize);
1398
1399                for (i = 0; i < vptr->tx.numq; i++) {
1400                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1401                        mac_tx_queue_run(regs, i);
1402                }
1403
1404                init_flow_control_register(vptr);
1405
1406                writel(CR0_STOP, &regs->CR0Clr);
1407                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1408
1409                mii_status = velocity_get_opt_media_mode(vptr);
1410                netif_stop_queue(netdev);
1411
1412                mii_init(vptr, mii_status);
1413
1414                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1415                        velocity_print_link_status(vptr);
1416                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1417                                netif_wake_queue(netdev);
1418                }
1419
1420                enable_flow_control_ability(vptr);
1421                mac_hw_mibs_init(regs);
1422                mac_write_int_mask(vptr->int_mask, regs);
1423                mac_clear_isr(regs);
1424
1425        }
1426}
1427
1428static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1429{
1430        struct mac_regs __iomem *regs = vptr->mac_regs;
1431        int avail, dirty, unusable;
1432
1433        /*
1434         * RD number must be equal to 4X per hardware spec
1435         * (programming guide rev 1.20, p.13)
1436         */
1437        if (vptr->rx.filled < 4)
1438                return;
1439
1440        wmb();
1441
1442        unusable = vptr->rx.filled & 0x0003;
1443        dirty = vptr->rx.dirty - unusable;
1444        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1445                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1446                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1447        }
1448
1449        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1450        vptr->rx.filled = unusable;
1451}
1452
1453/**
1454 *      velocity_init_dma_rings -       set up DMA rings
1455 *      @vptr: Velocity to set up
1456 *
1457 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1458 *      to use.
1459 */
1460static int velocity_init_dma_rings(struct velocity_info *vptr)
1461{
1462        struct velocity_opt *opt = &vptr->options;
1463        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1464        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1465        dma_addr_t pool_dma;
1466        void *pool;
1467        unsigned int i;
1468
1469        /*
1470         * Allocate all RD/TD rings a single pool.
1471         *
1472         * dma_alloc_coherent() fulfills the requirement for 64 bytes
1473         * alignment
1474         */
1475        pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1476                                    rx_ring_size, &pool_dma, GFP_ATOMIC);
1477        if (!pool) {
1478                dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1479                        vptr->netdev->name);
1480                return -ENOMEM;
1481        }
1482
1483        vptr->rx.ring = pool;
1484        vptr->rx.pool_dma = pool_dma;
1485
1486        pool += rx_ring_size;
1487        pool_dma += rx_ring_size;
1488
1489        for (i = 0; i < vptr->tx.numq; i++) {
1490                vptr->tx.rings[i] = pool;
1491                vptr->tx.pool_dma[i] = pool_dma;
1492                pool += tx_ring_size;
1493                pool_dma += tx_ring_size;
1494        }
1495
1496        return 0;
1497}
1498
1499static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1500{
1501        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1502}
1503
1504/**
1505 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1506 *      @vptr: velocity
1507 *      @idx: ring index
1508 *
1509 *      Allocate a new full sized buffer for the reception of a frame and
1510 *      map it into PCI space for the hardware to use. The hardware
1511 *      requires *64* byte alignment of the buffer which makes life
1512 *      less fun than would be ideal.
1513 */
1514static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1515{
1516        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1517        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1518
1519        rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1520        if (rd_info->skb == NULL)
1521                return -ENOMEM;
1522
1523        /*
1524         *      Do the gymnastics to get the buffer head for data at
1525         *      64byte alignment.
1526         */
1527        skb_reserve(rd_info->skb,
1528                        64 - ((unsigned long) rd_info->skb->data & 63));
1529        rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1530                                        vptr->rx.buf_sz, DMA_FROM_DEVICE);
1531
1532        /*
1533         *      Fill in the descriptor to match
1534         */
1535
1536        *((u32 *) & (rd->rdesc0)) = 0;
1537        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1538        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1539        rd->pa_high = 0;
1540        return 0;
1541}
1542
1543
1544static int velocity_rx_refill(struct velocity_info *vptr)
1545{
1546        int dirty = vptr->rx.dirty, done = 0;
1547
1548        do {
1549                struct rx_desc *rd = vptr->rx.ring + dirty;
1550
1551                /* Fine for an all zero Rx desc at init time as well */
1552                if (rd->rdesc0.len & OWNED_BY_NIC)
1553                        break;
1554
1555                if (!vptr->rx.info[dirty].skb) {
1556                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1557                                break;
1558                }
1559                done++;
1560                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1561        } while (dirty != vptr->rx.curr);
1562
1563        if (done) {
1564                vptr->rx.dirty = dirty;
1565                vptr->rx.filled += done;
1566        }
1567
1568        return done;
1569}
1570
1571/**
1572 *      velocity_free_rd_ring   -       free receive ring
1573 *      @vptr: velocity to clean up
1574 *
1575 *      Free the receive buffers for each ring slot and any
1576 *      attached socket buffers that need to go away.
1577 */
1578static void velocity_free_rd_ring(struct velocity_info *vptr)
1579{
1580        int i;
1581
1582        if (vptr->rx.info == NULL)
1583                return;
1584
1585        for (i = 0; i < vptr->options.numrx; i++) {
1586                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1587                struct rx_desc *rd = vptr->rx.ring + i;
1588
1589                memset(rd, 0, sizeof(*rd));
1590
1591                if (!rd_info->skb)
1592                        continue;
1593                dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1594                                 DMA_FROM_DEVICE);
1595                rd_info->skb_dma = 0;
1596
1597                dev_kfree_skb(rd_info->skb);
1598                rd_info->skb = NULL;
1599        }
1600
1601        kfree(vptr->rx.info);
1602        vptr->rx.info = NULL;
1603}
1604
1605/**
1606 *      velocity_init_rd_ring   -       set up receive ring
1607 *      @vptr: velocity to configure
1608 *
1609 *      Allocate and set up the receive buffers for each ring slot and
1610 *      assign them to the network adapter.
1611 */
1612static int velocity_init_rd_ring(struct velocity_info *vptr)
1613{
1614        int ret = -ENOMEM;
1615
1616        vptr->rx.info = kcalloc(vptr->options.numrx,
1617                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1618        if (!vptr->rx.info)
1619                goto out;
1620
1621        velocity_init_rx_ring_indexes(vptr);
1622
1623        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1624                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1625                        "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
1626                velocity_free_rd_ring(vptr);
1627                goto out;
1628        }
1629
1630        ret = 0;
1631out:
1632        return ret;
1633}
1634
1635/**
1636 *      velocity_init_td_ring   -       set up transmit ring
1637 *      @vptr:  velocity
1638 *
1639 *      Set up the transmit ring and chain the ring pointers together.
1640 *      Returns zero on success or a negative posix errno code for
1641 *      failure.
1642 */
1643static int velocity_init_td_ring(struct velocity_info *vptr)
1644{
1645        int j;
1646
1647        /* Init the TD ring entries */
1648        for (j = 0; j < vptr->tx.numq; j++) {
1649
1650                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1651                                            sizeof(struct velocity_td_info),
1652                                            GFP_KERNEL);
1653                if (!vptr->tx.infos[j]) {
1654                        while (--j >= 0)
1655                                kfree(vptr->tx.infos[j]);
1656                        return -ENOMEM;
1657                }
1658
1659                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1660        }
1661        return 0;
1662}
1663
1664/**
1665 *      velocity_free_dma_rings -       free PCI ring pointers
1666 *      @vptr: Velocity to free from
1667 *
1668 *      Clean up the PCI ring buffers allocated to this velocity.
1669 */
1670static void velocity_free_dma_rings(struct velocity_info *vptr)
1671{
1672        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1673                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1674
1675        dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1676}
1677
1678static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1679{
1680        int ret;
1681
1682        velocity_set_rxbufsize(vptr, mtu);
1683
1684        ret = velocity_init_dma_rings(vptr);
1685        if (ret < 0)
1686                goto out;
1687
1688        ret = velocity_init_rd_ring(vptr);
1689        if (ret < 0)
1690                goto err_free_dma_rings_0;
1691
1692        ret = velocity_init_td_ring(vptr);
1693        if (ret < 0)
1694                goto err_free_rd_ring_1;
1695out:
1696        return ret;
1697
1698err_free_rd_ring_1:
1699        velocity_free_rd_ring(vptr);
1700err_free_dma_rings_0:
1701        velocity_free_dma_rings(vptr);
1702        goto out;
1703}
1704
1705/**
1706 *      velocity_free_tx_buf    -       free transmit buffer
1707 *      @vptr: velocity
1708 *      @tdinfo: buffer
1709 *
1710 *      Release an transmit buffer. If the buffer was preallocated then
1711 *      recycle it, if not then unmap the buffer.
1712 */
1713static void velocity_free_tx_buf(struct velocity_info *vptr,
1714                struct velocity_td_info *tdinfo, struct tx_desc *td)
1715{
1716        struct sk_buff *skb = tdinfo->skb;
1717        int i;
1718
1719        /*
1720         *      Don't unmap the pre-allocated tx_bufs
1721         */
1722        for (i = 0; i < tdinfo->nskb_dma; i++) {
1723                size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1724
1725                /* For scatter-gather */
1726                if (skb_shinfo(skb)->nr_frags > 0)
1727                        pktlen = max_t(size_t, pktlen,
1728                                       td->td_buf[i].size & ~TD_QUEUE);
1729
1730                dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1731                                 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1732        }
1733        dev_consume_skb_irq(skb);
1734        tdinfo->skb = NULL;
1735}
1736
1737/*
1738 *      FIXME: could we merge this with velocity_free_tx_buf ?
1739 */
1740static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1741                                                         int q, int n)
1742{
1743        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1744        int i;
1745
1746        if (td_info == NULL)
1747                return;
1748
1749        if (td_info->skb) {
1750                for (i = 0; i < td_info->nskb_dma; i++) {
1751                        if (td_info->skb_dma[i]) {
1752                                dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1753                                        td_info->skb->len, DMA_TO_DEVICE);
1754                                td_info->skb_dma[i] = 0;
1755                        }
1756                }
1757                dev_kfree_skb(td_info->skb);
1758                td_info->skb = NULL;
1759        }
1760}
1761
1762/**
1763 *      velocity_free_td_ring   -       free td ring
1764 *      @vptr: velocity
1765 *
1766 *      Free up the transmit ring for this particular velocity adapter.
1767 *      We free the ring contents but not the ring itself.
1768 */
1769static void velocity_free_td_ring(struct velocity_info *vptr)
1770{
1771        int i, j;
1772
1773        for (j = 0; j < vptr->tx.numq; j++) {
1774                if (vptr->tx.infos[j] == NULL)
1775                        continue;
1776                for (i = 0; i < vptr->options.numtx; i++)
1777                        velocity_free_td_ring_entry(vptr, j, i);
1778
1779                kfree(vptr->tx.infos[j]);
1780                vptr->tx.infos[j] = NULL;
1781        }
1782}
1783
1784static void velocity_free_rings(struct velocity_info *vptr)
1785{
1786        velocity_free_td_ring(vptr);
1787        velocity_free_rd_ring(vptr);
1788        velocity_free_dma_rings(vptr);
1789}
1790
1791/**
1792 *      velocity_error  -       handle error from controller
1793 *      @vptr: velocity
1794 *      @status: card status
1795 *
1796 *      Process an error report from the hardware and attempt to recover
1797 *      the card itself. At the moment we cannot recover from some
1798 *      theoretically impossible errors but this could be fixed using
1799 *      the pci_device_failed logic to bounce the hardware
1800 *
1801 */
1802static void velocity_error(struct velocity_info *vptr, int status)
1803{
1804
1805        if (status & ISR_TXSTLI) {
1806                struct mac_regs __iomem *regs = vptr->mac_regs;
1807
1808                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1809                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1810                writew(TRDCSR_RUN, &regs->TDCSRClr);
1811                netif_stop_queue(vptr->netdev);
1812
1813                /* FIXME: port over the pci_device_failed code and use it
1814                   here */
1815        }
1816
1817        if (status & ISR_SRCI) {
1818                struct mac_regs __iomem *regs = vptr->mac_regs;
1819                int linked;
1820
1821                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1822                        vptr->mii_status = check_connection_type(regs);
1823
1824                        /*
1825                         *      If it is a 3119, disable frame bursting in
1826                         *      halfduplex mode and enable it in fullduplex
1827                         *       mode
1828                         */
1829                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1830                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1831                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1832                                else
1833                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1834                        }
1835                        /*
1836                         *      Only enable CD heart beat counter in 10HD mode
1837                         */
1838                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1839                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1840                        else
1841                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1842
1843                        setup_queue_timers(vptr);
1844                }
1845                /*
1846                 *      Get link status from PHYSR0
1847                 */
1848                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1849
1850                if (linked) {
1851                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1852                        netif_carrier_on(vptr->netdev);
1853                } else {
1854                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1855                        netif_carrier_off(vptr->netdev);
1856                }
1857
1858                velocity_print_link_status(vptr);
1859                enable_flow_control_ability(vptr);
1860
1861                /*
1862                 *      Re-enable auto-polling because SRCI will disable
1863                 *      auto-polling
1864                 */
1865
1866                enable_mii_autopoll(regs);
1867
1868                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1869                        netif_stop_queue(vptr->netdev);
1870                else
1871                        netif_wake_queue(vptr->netdev);
1872
1873        }
1874        if (status & ISR_MIBFI)
1875                velocity_update_hw_mibs(vptr);
1876        if (status & ISR_LSTEI)
1877                mac_rx_queue_wake(vptr->mac_regs);
1878}
1879
1880/**
1881 *      tx_srv          -       transmit interrupt service
1882 *      @vptr; Velocity
1883 *
1884 *      Scan the queues looking for transmitted packets that
1885 *      we can complete and clean up. Update any statistics as
1886 *      necessary/
1887 */
1888static int velocity_tx_srv(struct velocity_info *vptr)
1889{
1890        struct tx_desc *td;
1891        int qnum;
1892        int full = 0;
1893        int idx;
1894        int works = 0;
1895        struct velocity_td_info *tdinfo;
1896        struct net_device_stats *stats = &vptr->netdev->stats;
1897
1898        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1899                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1900                        idx = (idx + 1) % vptr->options.numtx) {
1901
1902                        /*
1903                         *      Get Tx Descriptor
1904                         */
1905                        td = &(vptr->tx.rings[qnum][idx]);
1906                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1907
1908                        if (td->tdesc0.len & OWNED_BY_NIC)
1909                                break;
1910
1911                        if ((works++ > 15))
1912                                break;
1913
1914                        if (td->tdesc0.TSR & TSR0_TERR) {
1915                                stats->tx_errors++;
1916                                stats->tx_dropped++;
1917                                if (td->tdesc0.TSR & TSR0_CDH)
1918                                        stats->tx_heartbeat_errors++;
1919                                if (td->tdesc0.TSR & TSR0_CRS)
1920                                        stats->tx_carrier_errors++;
1921                                if (td->tdesc0.TSR & TSR0_ABT)
1922                                        stats->tx_aborted_errors++;
1923                                if (td->tdesc0.TSR & TSR0_OWC)
1924                                        stats->tx_window_errors++;
1925                        } else {
1926                                stats->tx_packets++;
1927                                stats->tx_bytes += tdinfo->skb->len;
1928                        }
1929                        velocity_free_tx_buf(vptr, tdinfo, td);
1930                        vptr->tx.used[qnum]--;
1931                }
1932                vptr->tx.tail[qnum] = idx;
1933
1934                if (AVAIL_TD(vptr, qnum) < 1)
1935                        full = 1;
1936        }
1937        /*
1938         *      Look to see if we should kick the transmit network
1939         *      layer for more work.
1940         */
1941        if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1942            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1943                netif_wake_queue(vptr->netdev);
1944        }
1945        return works;
1946}
1947
1948/**
1949 *      velocity_rx_csum        -       checksum process
1950 *      @rd: receive packet descriptor
1951 *      @skb: network layer packet buffer
1952 *
1953 *      Process the status bits for the received packet and determine
1954 *      if the checksum was computed and verified by the hardware
1955 */
1956static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1957{
1958        skb_checksum_none_assert(skb);
1959
1960        if (rd->rdesc1.CSM & CSM_IPKT) {
1961                if (rd->rdesc1.CSM & CSM_IPOK) {
1962                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1963                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1964                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1965                                        return;
1966                        }
1967                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1968                }
1969        }
1970}
1971
1972/**
1973 *      velocity_rx_copy        -       in place Rx copy for small packets
1974 *      @rx_skb: network layer packet buffer candidate
1975 *      @pkt_size: received data size
1976 *      @rd: receive packet descriptor
1977 *      @dev: network device
1978 *
1979 *      Replace the current skb that is scheduled for Rx processing by a
1980 *      shorter, immediately allocated skb, if the received packet is small
1981 *      enough. This function returns a negative value if the received
1982 *      packet is too big or if memory is exhausted.
1983 */
1984static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1985                            struct velocity_info *vptr)
1986{
1987        int ret = -1;
1988        if (pkt_size < rx_copybreak) {
1989                struct sk_buff *new_skb;
1990
1991                new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
1992                if (new_skb) {
1993                        new_skb->ip_summed = rx_skb[0]->ip_summed;
1994                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1995                        *rx_skb = new_skb;
1996                        ret = 0;
1997                }
1998
1999        }
2000        return ret;
2001}
2002
2003/**
2004 *      velocity_iph_realign    -       IP header alignment
2005 *      @vptr: velocity we are handling
2006 *      @skb: network layer packet buffer
2007 *      @pkt_size: received data size
2008 *
2009 *      Align IP header on a 2 bytes boundary. This behavior can be
2010 *      configured by the user.
2011 */
2012static inline void velocity_iph_realign(struct velocity_info *vptr,
2013                                        struct sk_buff *skb, int pkt_size)
2014{
2015        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2016                memmove(skb->data + 2, skb->data, pkt_size);
2017                skb_reserve(skb, 2);
2018        }
2019}
2020
2021/**
2022 *      velocity_receive_frame  -       received packet processor
2023 *      @vptr: velocity we are handling
2024 *      @idx: ring index
2025 *
2026 *      A packet has arrived. We process the packet and if appropriate
2027 *      pass the frame up the network stack
2028 */
2029static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2030{
2031        struct net_device_stats *stats = &vptr->netdev->stats;
2032        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2033        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2034        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2035        struct sk_buff *skb;
2036
2037        if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2038                if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2039                        VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2040                stats->rx_length_errors++;
2041                return -EINVAL;
2042        }
2043
2044        if (rd->rdesc0.RSR & RSR_MAR)
2045                stats->multicast++;
2046
2047        skb = rd_info->skb;
2048
2049        dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2050                                    vptr->rx.buf_sz, DMA_FROM_DEVICE);
2051
2052        velocity_rx_csum(rd, skb);
2053
2054        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2055                velocity_iph_realign(vptr, skb, pkt_len);
2056                rd_info->skb = NULL;
2057                dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2058                                 DMA_FROM_DEVICE);
2059        } else {
2060                dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2061                                           vptr->rx.buf_sz, DMA_FROM_DEVICE);
2062        }
2063
2064        skb_put(skb, pkt_len - 4);
2065        skb->protocol = eth_type_trans(skb, vptr->netdev);
2066
2067        if (rd->rdesc0.RSR & RSR_DETAG) {
2068                u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2069
2070                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2071        }
2072        netif_receive_skb(skb);
2073
2074        stats->rx_bytes += pkt_len;
2075        stats->rx_packets++;
2076
2077        return 0;
2078}
2079
2080/**
2081 *      velocity_rx_srv         -       service RX interrupt
2082 *      @vptr: velocity
2083 *
2084 *      Walk the receive ring of the velocity adapter and remove
2085 *      any received packets from the receive queue. Hand the ring
2086 *      slots back to the adapter for reuse.
2087 */
2088static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2089{
2090        struct net_device_stats *stats = &vptr->netdev->stats;
2091        int rd_curr = vptr->rx.curr;
2092        int works = 0;
2093
2094        while (works < budget_left) {
2095                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2096
2097                if (!vptr->rx.info[rd_curr].skb)
2098                        break;
2099
2100                if (rd->rdesc0.len & OWNED_BY_NIC)
2101                        break;
2102
2103                rmb();
2104
2105                /*
2106                 *      Don't drop CE or RL error frame although RXOK is off
2107                 */
2108                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2109                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2110                                stats->rx_dropped++;
2111                } else {
2112                        if (rd->rdesc0.RSR & RSR_CRC)
2113                                stats->rx_crc_errors++;
2114                        if (rd->rdesc0.RSR & RSR_FAE)
2115                                stats->rx_frame_errors++;
2116
2117                        stats->rx_dropped++;
2118                }
2119
2120                rd->size |= RX_INTEN;
2121
2122                rd_curr++;
2123                if (rd_curr >= vptr->options.numrx)
2124                        rd_curr = 0;
2125                works++;
2126        }
2127
2128        vptr->rx.curr = rd_curr;
2129
2130        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2131                velocity_give_many_rx_descs(vptr);
2132
2133        VAR_USED(stats);
2134        return works;
2135}
2136
2137static int velocity_poll(struct napi_struct *napi, int budget)
2138{
2139        struct velocity_info *vptr = container_of(napi,
2140                        struct velocity_info, napi);
2141        unsigned int rx_done;
2142        unsigned long flags;
2143
2144        /*
2145         * Do rx and tx twice for performance (taken from the VIA
2146         * out-of-tree driver).
2147         */
2148        rx_done = velocity_rx_srv(vptr, budget);
2149        spin_lock_irqsave(&vptr->lock, flags);
2150        velocity_tx_srv(vptr);
2151        /* If budget not fully consumed, exit the polling mode */
2152        if (rx_done < budget) {
2153                napi_complete_done(napi, rx_done);
2154                mac_enable_int(vptr->mac_regs);
2155        }
2156        spin_unlock_irqrestore(&vptr->lock, flags);
2157
2158        return rx_done;
2159}
2160
2161/**
2162 *      velocity_intr           -       interrupt callback
2163 *      @irq: interrupt number
2164 *      @dev_instance: interrupting device
2165 *
2166 *      Called whenever an interrupt is generated by the velocity
2167 *      adapter IRQ line. We may not be the source of the interrupt
2168 *      and need to identify initially if we are, and if not exit as
2169 *      efficiently as possible.
2170 */
2171static irqreturn_t velocity_intr(int irq, void *dev_instance)
2172{
2173        struct net_device *dev = dev_instance;
2174        struct velocity_info *vptr = netdev_priv(dev);
2175        u32 isr_status;
2176
2177        spin_lock(&vptr->lock);
2178        isr_status = mac_read_isr(vptr->mac_regs);
2179
2180        /* Not us ? */
2181        if (isr_status == 0) {
2182                spin_unlock(&vptr->lock);
2183                return IRQ_NONE;
2184        }
2185
2186        /* Ack the interrupt */
2187        mac_write_isr(vptr->mac_regs, isr_status);
2188
2189        if (likely(napi_schedule_prep(&vptr->napi))) {
2190                mac_disable_int(vptr->mac_regs);
2191                __napi_schedule(&vptr->napi);
2192        }
2193
2194        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2195                velocity_error(vptr, isr_status);
2196
2197        spin_unlock(&vptr->lock);
2198
2199        return IRQ_HANDLED;
2200}
2201
2202/**
2203 *      velocity_open           -       interface activation callback
2204 *      @dev: network layer device to open
2205 *
2206 *      Called when the network layer brings the interface up. Returns
2207 *      a negative posix error code on failure, or zero on success.
2208 *
2209 *      All the ring allocation and set up is done on open for this
2210 *      adapter to minimise memory usage when inactive
2211 */
2212static int velocity_open(struct net_device *dev)
2213{
2214        struct velocity_info *vptr = netdev_priv(dev);
2215        int ret;
2216
2217        ret = velocity_init_rings(vptr, dev->mtu);
2218        if (ret < 0)
2219                goto out;
2220
2221        /* Ensure chip is running */
2222        velocity_set_power_state(vptr, PCI_D0);
2223
2224        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2225
2226        ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2227                          dev->name, dev);
2228        if (ret < 0) {
2229                /* Power down the chip */
2230                velocity_set_power_state(vptr, PCI_D3hot);
2231                velocity_free_rings(vptr);
2232                goto out;
2233        }
2234
2235        velocity_give_many_rx_descs(vptr);
2236
2237        mac_enable_int(vptr->mac_regs);
2238        netif_start_queue(dev);
2239        napi_enable(&vptr->napi);
2240        vptr->flags |= VELOCITY_FLAGS_OPENED;
2241out:
2242        return ret;
2243}
2244
2245/**
2246 *      velocity_shutdown       -       shut down the chip
2247 *      @vptr: velocity to deactivate
2248 *
2249 *      Shuts down the internal operations of the velocity and
2250 *      disables interrupts, autopolling, transmit and receive
2251 */
2252static void velocity_shutdown(struct velocity_info *vptr)
2253{
2254        struct mac_regs __iomem *regs = vptr->mac_regs;
2255        mac_disable_int(regs);
2256        writel(CR0_STOP, &regs->CR0Set);
2257        writew(0xFFFF, &regs->TDCSRClr);
2258        writeb(0xFF, &regs->RDCSRClr);
2259        safe_disable_mii_autopoll(regs);
2260        mac_clear_isr(regs);
2261}
2262
2263/**
2264 *      velocity_change_mtu     -       MTU change callback
2265 *      @dev: network device
2266 *      @new_mtu: desired MTU
2267 *
2268 *      Handle requests from the networking layer for MTU change on
2269 *      this interface. It gets called on a change by the network layer.
2270 *      Return zero for success or negative posix error code.
2271 */
2272static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2273{
2274        struct velocity_info *vptr = netdev_priv(dev);
2275        int ret = 0;
2276
2277        if (!netif_running(dev)) {
2278                dev->mtu = new_mtu;
2279                goto out_0;
2280        }
2281
2282        if (dev->mtu != new_mtu) {
2283                struct velocity_info *tmp_vptr;
2284                unsigned long flags;
2285                struct rx_info rx;
2286                struct tx_info tx;
2287
2288                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2289                if (!tmp_vptr) {
2290                        ret = -ENOMEM;
2291                        goto out_0;
2292                }
2293
2294                tmp_vptr->netdev = dev;
2295                tmp_vptr->pdev = vptr->pdev;
2296                tmp_vptr->dev = vptr->dev;
2297                tmp_vptr->options = vptr->options;
2298                tmp_vptr->tx.numq = vptr->tx.numq;
2299
2300                ret = velocity_init_rings(tmp_vptr, new_mtu);
2301                if (ret < 0)
2302                        goto out_free_tmp_vptr_1;
2303
2304                napi_disable(&vptr->napi);
2305
2306                spin_lock_irqsave(&vptr->lock, flags);
2307
2308                netif_stop_queue(dev);
2309                velocity_shutdown(vptr);
2310
2311                rx = vptr->rx;
2312                tx = vptr->tx;
2313
2314                vptr->rx = tmp_vptr->rx;
2315                vptr->tx = tmp_vptr->tx;
2316
2317                tmp_vptr->rx = rx;
2318                tmp_vptr->tx = tx;
2319
2320                dev->mtu = new_mtu;
2321
2322                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2323
2324                velocity_give_many_rx_descs(vptr);
2325
2326                napi_enable(&vptr->napi);
2327
2328                mac_enable_int(vptr->mac_regs);
2329                netif_start_queue(dev);
2330
2331                spin_unlock_irqrestore(&vptr->lock, flags);
2332
2333                velocity_free_rings(tmp_vptr);
2334
2335out_free_tmp_vptr_1:
2336                kfree(tmp_vptr);
2337        }
2338out_0:
2339        return ret;
2340}
2341
2342#ifdef CONFIG_NET_POLL_CONTROLLER
2343/**
2344 *  velocity_poll_controller            -       Velocity Poll controller function
2345 *  @dev: network device
2346 *
2347 *
2348 *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2349 *  with interrupts disabled.
2350 */
2351static void velocity_poll_controller(struct net_device *dev)
2352{
2353        disable_irq(dev->irq);
2354        velocity_intr(dev->irq, dev);
2355        enable_irq(dev->irq);
2356}
2357#endif
2358
2359/**
2360 *      velocity_mii_ioctl              -       MII ioctl handler
2361 *      @dev: network device
2362 *      @ifr: the ifreq block for the ioctl
2363 *      @cmd: the command
2364 *
2365 *      Process MII requests made via ioctl from the network layer. These
2366 *      are used by tools like kudzu to interrogate the link state of the
2367 *      hardware
2368 */
2369static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2370{
2371        struct velocity_info *vptr = netdev_priv(dev);
2372        struct mac_regs __iomem *regs = vptr->mac_regs;
2373        unsigned long flags;
2374        struct mii_ioctl_data *miidata = if_mii(ifr);
2375        int err;
2376
2377        switch (cmd) {
2378        case SIOCGMIIPHY:
2379                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2380                break;
2381        case SIOCGMIIREG:
2382                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2383                        return -ETIMEDOUT;
2384                break;
2385        case SIOCSMIIREG:
2386                spin_lock_irqsave(&vptr->lock, flags);
2387                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2388                spin_unlock_irqrestore(&vptr->lock, flags);
2389                check_connection_type(vptr->mac_regs);
2390                if (err)
2391                        return err;
2392                break;
2393        default:
2394                return -EOPNOTSUPP;
2395        }
2396        return 0;
2397}
2398
2399/**
2400 *      velocity_ioctl          -       ioctl entry point
2401 *      @dev: network device
2402 *      @rq: interface request ioctl
2403 *      @cmd: command code
2404 *
2405 *      Called when the user issues an ioctl request to the network
2406 *      device in question. The velocity interface supports MII.
2407 */
2408static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2409{
2410        struct velocity_info *vptr = netdev_priv(dev);
2411        int ret;
2412
2413        /* If we are asked for information and the device is power
2414           saving then we need to bring the device back up to talk to it */
2415
2416        if (!netif_running(dev))
2417                velocity_set_power_state(vptr, PCI_D0);
2418
2419        switch (cmd) {
2420        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2421        case SIOCGMIIREG:       /* Read MII PHY register. */
2422        case SIOCSMIIREG:       /* Write to MII PHY register. */
2423                ret = velocity_mii_ioctl(dev, rq, cmd);
2424                break;
2425
2426        default:
2427                ret = -EOPNOTSUPP;
2428        }
2429        if (!netif_running(dev))
2430                velocity_set_power_state(vptr, PCI_D3hot);
2431
2432
2433        return ret;
2434}
2435
2436/**
2437 *      velocity_get_status     -       statistics callback
2438 *      @dev: network device
2439 *
2440 *      Callback from the network layer to allow driver statistics
2441 *      to be resynchronized with hardware collected state. In the
2442 *      case of the velocity we need to pull the MIB counters from
2443 *      the hardware into the counters before letting the network
2444 *      layer display them.
2445 */
2446static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2447{
2448        struct velocity_info *vptr = netdev_priv(dev);
2449
2450        /* If the hardware is down, don't touch MII */
2451        if (!netif_running(dev))
2452                return &dev->stats;
2453
2454        spin_lock_irq(&vptr->lock);
2455        velocity_update_hw_mibs(vptr);
2456        spin_unlock_irq(&vptr->lock);
2457
2458        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2459        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2460        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2461
2462//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2463        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2464        /* detailed rx_errors: */
2465//  unsigned long   rx_length_errors;
2466//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2467        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2468//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2469//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2470//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2471
2472        /* detailed tx_errors */
2473//  unsigned long   tx_fifo_errors;
2474
2475        return &dev->stats;
2476}
2477
2478/**
2479 *      velocity_close          -       close adapter callback
2480 *      @dev: network device
2481 *
2482 *      Callback from the network layer when the velocity is being
2483 *      deactivated by the network layer
2484 */
2485static int velocity_close(struct net_device *dev)
2486{
2487        struct velocity_info *vptr = netdev_priv(dev);
2488
2489        napi_disable(&vptr->napi);
2490        netif_stop_queue(dev);
2491        velocity_shutdown(vptr);
2492
2493        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2494                velocity_get_ip(vptr);
2495
2496        free_irq(dev->irq, dev);
2497
2498        velocity_free_rings(vptr);
2499
2500        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2501        return 0;
2502}
2503
2504/**
2505 *      velocity_xmit           -       transmit packet callback
2506 *      @skb: buffer to transmit
2507 *      @dev: network device
2508 *
2509 *      Called by the networ layer to request a packet is queued to
2510 *      the velocity. Returns zero on success.
2511 */
2512static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2513                                 struct net_device *dev)
2514{
2515        struct velocity_info *vptr = netdev_priv(dev);
2516        int qnum = 0;
2517        struct tx_desc *td_ptr;
2518        struct velocity_td_info *tdinfo;
2519        unsigned long flags;
2520        int pktlen;
2521        int index, prev;
2522        int i = 0;
2523
2524        if (skb_padto(skb, ETH_ZLEN))
2525                goto out;
2526
2527        /* The hardware can handle at most 7 memory segments, so merge
2528         * the skb if there are more */
2529        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2530                dev_kfree_skb_any(skb);
2531                return NETDEV_TX_OK;
2532        }
2533
2534        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2535                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2536                                skb_headlen(skb);
2537
2538        spin_lock_irqsave(&vptr->lock, flags);
2539
2540        index = vptr->tx.curr[qnum];
2541        td_ptr = &(vptr->tx.rings[qnum][index]);
2542        tdinfo = &(vptr->tx.infos[qnum][index]);
2543
2544        td_ptr->tdesc1.TCR = TCR0_TIC;
2545        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2546
2547        /*
2548         *      Map the linear network buffer into PCI space and
2549         *      add it to the transmit ring.
2550         */
2551        tdinfo->skb = skb;
2552        tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2553                                                                DMA_TO_DEVICE);
2554        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2555        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2556        td_ptr->td_buf[0].pa_high = 0;
2557        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2558
2559        /* Handle fragments */
2560        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562
2563                tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2564                                                          frag, 0,
2565                                                          skb_frag_size(frag),
2566                                                          DMA_TO_DEVICE);
2567
2568                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2569                td_ptr->td_buf[i + 1].pa_high = 0;
2570                td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2571        }
2572        tdinfo->nskb_dma = i + 1;
2573
2574        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2575
2576        if (skb_vlan_tag_present(skb)) {
2577                td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2578                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2579        }
2580
2581        /*
2582         *      Handle hardware checksum
2583         */
2584        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2585                const struct iphdr *ip = ip_hdr(skb);
2586                if (ip->protocol == IPPROTO_TCP)
2587                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2588                else if (ip->protocol == IPPROTO_UDP)
2589                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2590                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2591        }
2592
2593        prev = index - 1;
2594        if (prev < 0)
2595                prev = vptr->options.numtx - 1;
2596        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2597        vptr->tx.used[qnum]++;
2598        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2599
2600        if (AVAIL_TD(vptr, qnum) < 1)
2601                netif_stop_queue(dev);
2602
2603        td_ptr = &(vptr->tx.rings[qnum][prev]);
2604        td_ptr->td_buf[0].size |= TD_QUEUE;
2605        mac_tx_queue_wake(vptr->mac_regs, qnum);
2606
2607        spin_unlock_irqrestore(&vptr->lock, flags);
2608out:
2609        return NETDEV_TX_OK;
2610}
2611
2612static const struct net_device_ops velocity_netdev_ops = {
2613        .ndo_open               = velocity_open,
2614        .ndo_stop               = velocity_close,
2615        .ndo_start_xmit         = velocity_xmit,
2616        .ndo_get_stats          = velocity_get_stats,
2617        .ndo_validate_addr      = eth_validate_addr,
2618        .ndo_set_mac_address    = eth_mac_addr,
2619        .ndo_set_rx_mode        = velocity_set_multi,
2620        .ndo_change_mtu         = velocity_change_mtu,
2621        .ndo_do_ioctl           = velocity_ioctl,
2622        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2623        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2624#ifdef CONFIG_NET_POLL_CONTROLLER
2625        .ndo_poll_controller = velocity_poll_controller,
2626#endif
2627};
2628
2629/**
2630 *      velocity_init_info      -       init private data
2631 *      @pdev: PCI device
2632 *      @vptr: Velocity info
2633 *      @info: Board type
2634 *
2635 *      Set up the initial velocity_info struct for the device that has been
2636 *      discovered.
2637 */
2638static void velocity_init_info(struct velocity_info *vptr,
2639                                const struct velocity_info_tbl *info)
2640{
2641        vptr->chip_id = info->chip_id;
2642        vptr->tx.numq = info->txqueue;
2643        vptr->multicast_limit = MCAM_SIZE;
2644        spin_lock_init(&vptr->lock);
2645}
2646
2647/**
2648 *      velocity_get_pci_info   -       retrieve PCI info for device
2649 *      @vptr: velocity device
2650 *      @pdev: PCI device it matches
2651 *
2652 *      Retrieve the PCI configuration space data that interests us from
2653 *      the kernel PCI layer
2654 */
2655static int velocity_get_pci_info(struct velocity_info *vptr)
2656{
2657        struct pci_dev *pdev = vptr->pdev;
2658
2659        pci_set_master(pdev);
2660
2661        vptr->ioaddr = pci_resource_start(pdev, 0);
2662        vptr->memaddr = pci_resource_start(pdev, 1);
2663
2664        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2665                dev_err(&pdev->dev,
2666                           "region #0 is not an I/O resource, aborting.\n");
2667                return -EINVAL;
2668        }
2669
2670        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2671                dev_err(&pdev->dev,
2672                           "region #1 is an I/O resource, aborting.\n");
2673                return -EINVAL;
2674        }
2675
2676        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2677                dev_err(&pdev->dev, "region #1 is too small.\n");
2678                return -EINVAL;
2679        }
2680
2681        return 0;
2682}
2683
2684/**
2685 *      velocity_get_platform_info - retrieve platform info for device
2686 *      @vptr: velocity device
2687 *      @pdev: platform device it matches
2688 *
2689 *      Retrieve the Platform configuration data that interests us
2690 */
2691static int velocity_get_platform_info(struct velocity_info *vptr)
2692{
2693        struct resource res;
2694        int ret;
2695
2696        if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2697                vptr->no_eeprom = 1;
2698
2699        ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2700        if (ret) {
2701                dev_err(vptr->dev, "unable to find memory address\n");
2702                return ret;
2703        }
2704
2705        vptr->memaddr = res.start;
2706
2707        if (resource_size(&res) < VELOCITY_IO_SIZE) {
2708                dev_err(vptr->dev, "memory region is too small.\n");
2709                return -EINVAL;
2710        }
2711
2712        return 0;
2713}
2714
2715/**
2716 *      velocity_print_info     -       per driver data
2717 *      @vptr: velocity
2718 *
2719 *      Print per driver data as the kernel driver finds Velocity
2720 *      hardware
2721 */
2722static void velocity_print_info(struct velocity_info *vptr)
2723{
2724        struct net_device *dev = vptr->netdev;
2725
2726        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2727        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2728                dev->name, dev->dev_addr);
2729}
2730
2731static u32 velocity_get_link(struct net_device *dev)
2732{
2733        struct velocity_info *vptr = netdev_priv(dev);
2734        struct mac_regs __iomem *regs = vptr->mac_regs;
2735        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2736}
2737
2738/**
2739 *      velocity_probe - set up discovered velocity device
2740 *      @pdev: PCI device
2741 *      @ent: PCI device table entry that matched
2742 *      @bustype: bus that device is connected to
2743 *
2744 *      Configure a discovered adapter from scratch. Return a negative
2745 *      errno error code on failure paths.
2746 */
2747static int velocity_probe(struct device *dev, int irq,
2748                           const struct velocity_info_tbl *info,
2749                           enum velocity_bus_type bustype)
2750{
2751        static int first = 1;
2752        struct net_device *netdev;
2753        int i;
2754        const char *drv_string;
2755        struct velocity_info *vptr;
2756        struct mac_regs __iomem *regs;
2757        int ret = -ENOMEM;
2758
2759        /* FIXME: this driver, like almost all other ethernet drivers,
2760         * can support more than MAX_UNITS.
2761         */
2762        if (velocity_nics >= MAX_UNITS) {
2763                dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2764                return -ENODEV;
2765        }
2766
2767        netdev = alloc_etherdev(sizeof(struct velocity_info));
2768        if (!netdev)
2769                goto out;
2770
2771        /* Chain it all together */
2772
2773        SET_NETDEV_DEV(netdev, dev);
2774        vptr = netdev_priv(netdev);
2775
2776        if (first) {
2777                printk(KERN_INFO "%s Ver. %s\n",
2778                        VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2779                printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2780                printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2781                first = 0;
2782        }
2783
2784        netdev->irq = irq;
2785        vptr->netdev = netdev;
2786        vptr->dev = dev;
2787
2788        velocity_init_info(vptr, info);
2789
2790        if (bustype == BUS_PCI) {
2791                vptr->pdev = to_pci_dev(dev);
2792
2793                ret = velocity_get_pci_info(vptr);
2794                if (ret < 0)
2795                        goto err_free_dev;
2796        } else {
2797                vptr->pdev = NULL;
2798                ret = velocity_get_platform_info(vptr);
2799                if (ret < 0)
2800                        goto err_free_dev;
2801        }
2802
2803        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2804        if (regs == NULL) {
2805                ret = -EIO;
2806                goto err_free_dev;
2807        }
2808
2809        vptr->mac_regs = regs;
2810        vptr->rev_id = readb(&regs->rev_id);
2811
2812        mac_wol_reset(regs);
2813
2814        for (i = 0; i < 6; i++)
2815                netdev->dev_addr[i] = readb(&regs->PAR[i]);
2816
2817
2818        drv_string = dev_driver_string(dev);
2819
2820        velocity_get_options(&vptr->options, velocity_nics, drv_string);
2821
2822        /*
2823         *      Mask out the options cannot be set to the chip
2824         */
2825
2826        vptr->options.flags &= info->flags;
2827
2828        /*
2829         *      Enable the chip specified capbilities
2830         */
2831
2832        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2833
2834        vptr->wol_opts = vptr->options.wol_opts;
2835        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2836
2837        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2838
2839        netdev->netdev_ops = &velocity_netdev_ops;
2840        netdev->ethtool_ops = &velocity_ethtool_ops;
2841        netif_napi_add(netdev, &vptr->napi, velocity_poll,
2842                                                        VELOCITY_NAPI_WEIGHT);
2843
2844        netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2845                           NETIF_F_HW_VLAN_CTAG_TX;
2846        netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2847                        NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2848                        NETIF_F_IP_CSUM;
2849
2850        /* MTU range: 64 - 9000 */
2851        netdev->min_mtu = VELOCITY_MIN_MTU;
2852        netdev->max_mtu = VELOCITY_MAX_MTU;
2853
2854        ret = register_netdev(netdev);
2855        if (ret < 0)
2856                goto err_iounmap;
2857
2858        if (!velocity_get_link(netdev)) {
2859                netif_carrier_off(netdev);
2860                vptr->mii_status |= VELOCITY_LINK_FAIL;
2861        }
2862
2863        velocity_print_info(vptr);
2864        dev_set_drvdata(vptr->dev, netdev);
2865
2866        /* and leave the chip powered down */
2867
2868        velocity_set_power_state(vptr, PCI_D3hot);
2869        velocity_nics++;
2870out:
2871        return ret;
2872
2873err_iounmap:
2874        netif_napi_del(&vptr->napi);
2875        iounmap(regs);
2876err_free_dev:
2877        free_netdev(netdev);
2878        goto out;
2879}
2880
2881/**
2882 *      velocity_remove - device unplug
2883 *      @dev: device being removed
2884 *
2885 *      Device unload callback. Called on an unplug or on module
2886 *      unload for each active device that is present. Disconnects
2887 *      the device from the network layer and frees all the resources
2888 */
2889static int velocity_remove(struct device *dev)
2890{
2891        struct net_device *netdev = dev_get_drvdata(dev);
2892        struct velocity_info *vptr = netdev_priv(netdev);
2893
2894        unregister_netdev(netdev);
2895        netif_napi_del(&vptr->napi);
2896        iounmap(vptr->mac_regs);
2897        free_netdev(netdev);
2898        velocity_nics--;
2899
2900        return 0;
2901}
2902
2903static int velocity_pci_probe(struct pci_dev *pdev,
2904                               const struct pci_device_id *ent)
2905{
2906        const struct velocity_info_tbl *info =
2907                                        &chip_info_table[ent->driver_data];
2908        int ret;
2909
2910        ret = pci_enable_device(pdev);
2911        if (ret < 0)
2912                return ret;
2913
2914        ret = pci_request_regions(pdev, VELOCITY_NAME);
2915        if (ret < 0) {
2916                dev_err(&pdev->dev, "No PCI resources.\n");
2917                goto fail1;
2918        }
2919
2920        ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2921        if (ret == 0)
2922                return 0;
2923
2924        pci_release_regions(pdev);
2925fail1:
2926        pci_disable_device(pdev);
2927        return ret;
2928}
2929
2930static void velocity_pci_remove(struct pci_dev *pdev)
2931{
2932        velocity_remove(&pdev->dev);
2933
2934        pci_release_regions(pdev);
2935        pci_disable_device(pdev);
2936}
2937
2938static int velocity_platform_probe(struct platform_device *pdev)
2939{
2940        const struct of_device_id *of_id;
2941        const struct velocity_info_tbl *info;
2942        int irq;
2943
2944        of_id = of_match_device(velocity_of_ids, &pdev->dev);
2945        if (!of_id)
2946                return -EINVAL;
2947        info = of_id->data;
2948
2949        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2950        if (!irq)
2951                return -EINVAL;
2952
2953        return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2954}
2955
2956static int velocity_platform_remove(struct platform_device *pdev)
2957{
2958        velocity_remove(&pdev->dev);
2959
2960        return 0;
2961}
2962
2963#ifdef CONFIG_PM_SLEEP
2964/**
2965 *      wol_calc_crc            -       WOL CRC
2966 *      @pattern: data pattern
2967 *      @mask_pattern: mask
2968 *
2969 *      Compute the wake on lan crc hashes for the packet header
2970 *      we are interested in.
2971 */
2972static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2973{
2974        u16 crc = 0xFFFF;
2975        u8 mask;
2976        int i, j;
2977
2978        for (i = 0; i < size; i++) {
2979                mask = mask_pattern[i];
2980
2981                /* Skip this loop if the mask equals to zero */
2982                if (mask == 0x00)
2983                        continue;
2984
2985                for (j = 0; j < 8; j++) {
2986                        if ((mask & 0x01) == 0) {
2987                                mask >>= 1;
2988                                continue;
2989                        }
2990                        mask >>= 1;
2991                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2992                }
2993        }
2994        /*      Finally, invert the result once to get the correct data */
2995        crc = ~crc;
2996        return bitrev32(crc) >> 16;
2997}
2998
2999/**
3000 *      velocity_set_wol        -       set up for wake on lan
3001 *      @vptr: velocity to set WOL status on
3002 *
3003 *      Set a card up for wake on lan either by unicast or by
3004 *      ARP packet.
3005 *
3006 *      FIXME: check static buffer is safe here
3007 */
3008static int velocity_set_wol(struct velocity_info *vptr)
3009{
3010        struct mac_regs __iomem *regs = vptr->mac_regs;
3011        enum speed_opt spd_dpx = vptr->options.spd_dpx;
3012        static u8 buf[256];
3013        int i;
3014
3015        static u32 mask_pattern[2][4] = {
3016                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3017                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
3018        };
3019
3020        writew(0xFFFF, &regs->WOLCRClr);
3021        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3022        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3023
3024        /*
3025           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3026           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3027         */
3028
3029        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3030                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3031
3032        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3033                struct arp_packet *arp = (struct arp_packet *) buf;
3034                u16 crc;
3035                memset(buf, 0, sizeof(struct arp_packet) + 7);
3036
3037                for (i = 0; i < 4; i++)
3038                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3039
3040                arp->type = htons(ETH_P_ARP);
3041                arp->ar_op = htons(1);
3042
3043                memcpy(arp->ar_tip, vptr->ip_addr, 4);
3044
3045                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3046                                (u8 *) & mask_pattern[0][0]);
3047
3048                writew(crc, &regs->PatternCRC[0]);
3049                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3050        }
3051
3052        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3053        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3054
3055        writew(0x0FFF, &regs->WOLSRClr);
3056
3057        if (spd_dpx == SPD_DPX_1000_FULL)
3058                goto mac_done;
3059
3060        if (spd_dpx != SPD_DPX_AUTO)
3061                goto advertise_done;
3062
3063        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3064                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3065                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3066
3067                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3068        }
3069
3070        if (vptr->mii_status & VELOCITY_SPEED_1000)
3071                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3072
3073advertise_done:
3074        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3075
3076        {
3077                u8 GCR;
3078                GCR = readb(&regs->CHIPGCR);
3079                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3080                writeb(GCR, &regs->CHIPGCR);
3081        }
3082
3083mac_done:
3084        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3085        /* Turn on SWPTAG just before entering power mode */
3086        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3087        /* Go to bed ..... */
3088        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3089
3090        return 0;
3091}
3092
3093/**
3094 *      velocity_save_context   -       save registers
3095 *      @vptr: velocity
3096 *      @context: buffer for stored context
3097 *
3098 *      Retrieve the current configuration from the velocity hardware
3099 *      and stash it in the context structure, for use by the context
3100 *      restore functions. This allows us to save things we need across
3101 *      power down states
3102 */
3103static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3104{
3105        struct mac_regs __iomem *regs = vptr->mac_regs;
3106        u16 i;
3107        u8 __iomem *ptr = (u8 __iomem *)regs;
3108
3109        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3110                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3111
3112        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3113                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3114
3115        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3116                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3117
3118}
3119
3120static int velocity_suspend(struct device *dev)
3121{
3122        struct net_device *netdev = dev_get_drvdata(dev);
3123        struct velocity_info *vptr = netdev_priv(netdev);
3124        unsigned long flags;
3125
3126        if (!netif_running(vptr->netdev))
3127                return 0;
3128
3129        netif_device_detach(vptr->netdev);
3130
3131        spin_lock_irqsave(&vptr->lock, flags);
3132        if (vptr->pdev)
3133                pci_save_state(vptr->pdev);
3134
3135        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3136                velocity_get_ip(vptr);
3137                velocity_save_context(vptr, &vptr->context);
3138                velocity_shutdown(vptr);
3139                velocity_set_wol(vptr);
3140                if (vptr->pdev)
3141                        pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3142                velocity_set_power_state(vptr, PCI_D3hot);
3143        } else {
3144                velocity_save_context(vptr, &vptr->context);
3145                velocity_shutdown(vptr);
3146                if (vptr->pdev)
3147                        pci_disable_device(vptr->pdev);
3148                velocity_set_power_state(vptr, PCI_D3hot);
3149        }
3150
3151        spin_unlock_irqrestore(&vptr->lock, flags);
3152        return 0;
3153}
3154
3155/**
3156 *      velocity_restore_context        -       restore registers
3157 *      @vptr: velocity
3158 *      @context: buffer for stored context
3159 *
3160 *      Reload the register configuration from the velocity context
3161 *      created by velocity_save_context.
3162 */
3163static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3164{
3165        struct mac_regs __iomem *regs = vptr->mac_regs;
3166        int i;
3167        u8 __iomem *ptr = (u8 __iomem *)regs;
3168
3169        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3170                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3171
3172        /* Just skip cr0 */
3173        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3174                /* Clear */
3175                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3176                /* Set */
3177                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3178        }
3179
3180        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3181                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3182
3183        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3184                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3185
3186        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3187                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3188}
3189
3190static int velocity_resume(struct device *dev)
3191{
3192        struct net_device *netdev = dev_get_drvdata(dev);
3193        struct velocity_info *vptr = netdev_priv(netdev);
3194        unsigned long flags;
3195        int i;
3196
3197        if (!netif_running(vptr->netdev))
3198                return 0;
3199
3200        velocity_set_power_state(vptr, PCI_D0);
3201
3202        if (vptr->pdev) {
3203                pci_enable_wake(vptr->pdev, PCI_D0, 0);
3204                pci_restore_state(vptr->pdev);
3205        }
3206
3207        mac_wol_reset(vptr->mac_regs);
3208
3209        spin_lock_irqsave(&vptr->lock, flags);
3210        velocity_restore_context(vptr, &vptr->context);
3211        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3212        mac_disable_int(vptr->mac_regs);
3213
3214        velocity_tx_srv(vptr);
3215
3216        for (i = 0; i < vptr->tx.numq; i++) {
3217                if (vptr->tx.used[i])
3218                        mac_tx_queue_wake(vptr->mac_regs, i);
3219        }
3220
3221        mac_enable_int(vptr->mac_regs);
3222        spin_unlock_irqrestore(&vptr->lock, flags);
3223        netif_device_attach(vptr->netdev);
3224
3225        return 0;
3226}
3227#endif  /* CONFIG_PM_SLEEP */
3228
3229static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3230
3231/*
3232 *      Definition for our device driver. The PCI layer interface
3233 *      uses this to handle all our card discover and plugging
3234 */
3235static struct pci_driver velocity_pci_driver = {
3236        .name           = VELOCITY_NAME,
3237        .id_table       = velocity_pci_id_table,
3238        .probe          = velocity_pci_probe,
3239        .remove         = velocity_pci_remove,
3240        .driver = {
3241                .pm = &velocity_pm_ops,
3242        },
3243};
3244
3245static struct platform_driver velocity_platform_driver = {
3246        .probe          = velocity_platform_probe,
3247        .remove         = velocity_platform_remove,
3248        .driver = {
3249                .name = "via-velocity",
3250                .of_match_table = velocity_of_ids,
3251                .pm = &velocity_pm_ops,
3252        },
3253};
3254
3255/**
3256 *      velocity_ethtool_up     -       pre hook for ethtool
3257 *      @dev: network device
3258 *
3259 *      Called before an ethtool operation. We need to make sure the
3260 *      chip is out of D3 state before we poke at it.
3261 */
3262static int velocity_ethtool_up(struct net_device *dev)
3263{
3264        struct velocity_info *vptr = netdev_priv(dev);
3265        if (!netif_running(dev))
3266                velocity_set_power_state(vptr, PCI_D0);
3267        return 0;
3268}
3269
3270/**
3271 *      velocity_ethtool_down   -       post hook for ethtool
3272 *      @dev: network device
3273 *
3274 *      Called after an ethtool operation. Restore the chip back to D3
3275 *      state if it isn't running.
3276 */
3277static void velocity_ethtool_down(struct net_device *dev)
3278{
3279        struct velocity_info *vptr = netdev_priv(dev);
3280        if (!netif_running(dev))
3281                velocity_set_power_state(vptr, PCI_D3hot);
3282}
3283
3284static int velocity_get_link_ksettings(struct net_device *dev,
3285                                       struct ethtool_link_ksettings *cmd)
3286{
3287        struct velocity_info *vptr = netdev_priv(dev);
3288        struct mac_regs __iomem *regs = vptr->mac_regs;
3289        u32 status;
3290        u32 supported, advertising;
3291
3292        status = check_connection_type(vptr->mac_regs);
3293
3294        supported = SUPPORTED_TP |
3295                        SUPPORTED_Autoneg |
3296                        SUPPORTED_10baseT_Half |
3297                        SUPPORTED_10baseT_Full |
3298                        SUPPORTED_100baseT_Half |
3299                        SUPPORTED_100baseT_Full |
3300                        SUPPORTED_1000baseT_Half |
3301                        SUPPORTED_1000baseT_Full;
3302
3303        advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3304        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3305                advertising |=
3306                        ADVERTISED_10baseT_Half |
3307                        ADVERTISED_10baseT_Full |
3308                        ADVERTISED_100baseT_Half |
3309                        ADVERTISED_100baseT_Full |
3310                        ADVERTISED_1000baseT_Half |
3311                        ADVERTISED_1000baseT_Full;
3312        } else {
3313                switch (vptr->options.spd_dpx) {
3314                case SPD_DPX_1000_FULL:
3315                        advertising |= ADVERTISED_1000baseT_Full;
3316                        break;
3317                case SPD_DPX_100_HALF:
3318                        advertising |= ADVERTISED_100baseT_Half;
3319                        break;
3320                case SPD_DPX_100_FULL:
3321                        advertising |= ADVERTISED_100baseT_Full;
3322                        break;
3323                case SPD_DPX_10_HALF:
3324                        advertising |= ADVERTISED_10baseT_Half;
3325                        break;
3326                case SPD_DPX_10_FULL:
3327                        advertising |= ADVERTISED_10baseT_Full;
3328                        break;
3329                default:
3330                        break;
3331                }
3332        }
3333
3334        if (status & VELOCITY_SPEED_1000)
3335                cmd->base.speed = SPEED_1000;
3336        else if (status & VELOCITY_SPEED_100)
3337                cmd->base.speed = SPEED_100;
3338        else
3339                cmd->base.speed = SPEED_10;
3340
3341        cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3342                AUTONEG_ENABLE : AUTONEG_DISABLE;
3343        cmd->base.port = PORT_TP;
3344        cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3345
3346        if (status & VELOCITY_DUPLEX_FULL)
3347                cmd->base.duplex = DUPLEX_FULL;
3348        else
3349                cmd->base.duplex = DUPLEX_HALF;
3350
3351        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3352                                                supported);
3353        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3354                                                advertising);
3355
3356        return 0;
3357}
3358
3359static int velocity_set_link_ksettings(struct net_device *dev,
3360                                       const struct ethtool_link_ksettings *cmd)
3361{
3362        struct velocity_info *vptr = netdev_priv(dev);
3363        u32 speed = cmd->base.speed;
3364        u32 curr_status;
3365        u32 new_status = 0;
3366        int ret = 0;
3367
3368        curr_status = check_connection_type(vptr->mac_regs);
3369        curr_status &= (~VELOCITY_LINK_FAIL);
3370
3371        new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3372        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3373        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3374        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3375        new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3376                       VELOCITY_DUPLEX_FULL : 0);
3377
3378        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3379            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3380                ret = -EINVAL;
3381        } else {
3382                enum speed_opt spd_dpx;
3383
3384                if (new_status & VELOCITY_AUTONEG_ENABLE)
3385                        spd_dpx = SPD_DPX_AUTO;
3386                else if ((new_status & VELOCITY_SPEED_1000) &&
3387                         (new_status & VELOCITY_DUPLEX_FULL)) {
3388                        spd_dpx = SPD_DPX_1000_FULL;
3389                } else if (new_status & VELOCITY_SPEED_100)
3390                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3391                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3392                else if (new_status & VELOCITY_SPEED_10)
3393                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3394                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3395                else
3396                        return -EOPNOTSUPP;
3397
3398                vptr->options.spd_dpx = spd_dpx;
3399
3400                velocity_set_media_mode(vptr, new_status);
3401        }
3402
3403        return ret;
3404}
3405
3406static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3407{
3408        struct velocity_info *vptr = netdev_priv(dev);
3409
3410        strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3411        strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3412        if (vptr->pdev)
3413                strlcpy(info->bus_info, pci_name(vptr->pdev),
3414                                                sizeof(info->bus_info));
3415        else
3416                strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3417}
3418
3419static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3420{
3421        struct velocity_info *vptr = netdev_priv(dev);
3422        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3423        wol->wolopts |= WAKE_MAGIC;
3424        /*
3425           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3426                   wol.wolopts|=WAKE_PHY;
3427                         */
3428        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3429                wol->wolopts |= WAKE_UCAST;
3430        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3431                wol->wolopts |= WAKE_ARP;
3432        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3433}
3434
3435static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3436{
3437        struct velocity_info *vptr = netdev_priv(dev);
3438
3439        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3440                return -EFAULT;
3441        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3442
3443        /*
3444           if (wol.wolopts & WAKE_PHY) {
3445           vptr->wol_opts|=VELOCITY_WOL_PHY;
3446           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3447           }
3448         */
3449
3450        if (wol->wolopts & WAKE_MAGIC) {
3451                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3452                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3453        }
3454        if (wol->wolopts & WAKE_UCAST) {
3455                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3456                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3457        }
3458        if (wol->wolopts & WAKE_ARP) {
3459                vptr->wol_opts |= VELOCITY_WOL_ARP;
3460                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3461        }
3462        memcpy(vptr->wol_passwd, wol->sopass, 6);
3463        return 0;
3464}
3465
3466static u32 velocity_get_msglevel(struct net_device *dev)
3467{
3468        return msglevel;
3469}
3470
3471static void velocity_set_msglevel(struct net_device *dev, u32 value)
3472{
3473         msglevel = value;
3474}
3475
3476static int get_pending_timer_val(int val)
3477{
3478        int mult_bits = val >> 6;
3479        int mult = 1;
3480
3481        switch (mult_bits)
3482        {
3483        case 1:
3484                mult = 4; break;
3485        case 2:
3486                mult = 16; break;
3487        case 3:
3488                mult = 64; break;
3489        case 0:
3490        default:
3491                break;
3492        }
3493
3494        return (val & 0x3f) * mult;
3495}
3496
3497static void set_pending_timer_val(int *val, u32 us)
3498{
3499        u8 mult = 0;
3500        u8 shift = 0;
3501
3502        if (us >= 0x3f) {
3503                mult = 1; /* mult with 4 */
3504                shift = 2;
3505        }
3506        if (us >= 0x3f * 4) {
3507                mult = 2; /* mult with 16 */
3508                shift = 4;
3509        }
3510        if (us >= 0x3f * 16) {
3511                mult = 3; /* mult with 64 */
3512                shift = 6;
3513        }
3514
3515        *val = (mult << 6) | ((us >> shift) & 0x3f);
3516}
3517
3518
3519static int velocity_get_coalesce(struct net_device *dev,
3520                struct ethtool_coalesce *ecmd)
3521{
3522        struct velocity_info *vptr = netdev_priv(dev);
3523
3524        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3525        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3526
3527        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3528        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3529
3530        return 0;
3531}
3532
3533static int velocity_set_coalesce(struct net_device *dev,
3534                struct ethtool_coalesce *ecmd)
3535{
3536        struct velocity_info *vptr = netdev_priv(dev);
3537        int max_us = 0x3f * 64;
3538        unsigned long flags;
3539
3540        /* 6 bits of  */
3541        if (ecmd->tx_coalesce_usecs > max_us)
3542                return -EINVAL;
3543        if (ecmd->rx_coalesce_usecs > max_us)
3544                return -EINVAL;
3545
3546        if (ecmd->tx_max_coalesced_frames > 0xff)
3547                return -EINVAL;
3548        if (ecmd->rx_max_coalesced_frames > 0xff)
3549                return -EINVAL;
3550
3551        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3552        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3553
3554        set_pending_timer_val(&vptr->options.rxqueue_timer,
3555                        ecmd->rx_coalesce_usecs);
3556        set_pending_timer_val(&vptr->options.txqueue_timer,
3557                        ecmd->tx_coalesce_usecs);
3558
3559        /* Setup the interrupt suppression and queue timers */
3560        spin_lock_irqsave(&vptr->lock, flags);
3561        mac_disable_int(vptr->mac_regs);
3562        setup_adaptive_interrupts(vptr);
3563        setup_queue_timers(vptr);
3564
3565        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3566        mac_clear_isr(vptr->mac_regs);
3567        mac_enable_int(vptr->mac_regs);
3568        spin_unlock_irqrestore(&vptr->lock, flags);
3569
3570        return 0;
3571}
3572
3573static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3574        "rx_all",
3575        "rx_ok",
3576        "tx_ok",
3577        "rx_error",
3578        "rx_runt_ok",
3579        "rx_runt_err",
3580        "rx_64",
3581        "tx_64",
3582        "rx_65_to_127",
3583        "tx_65_to_127",
3584        "rx_128_to_255",
3585        "tx_128_to_255",
3586        "rx_256_to_511",
3587        "tx_256_to_511",
3588        "rx_512_to_1023",
3589        "tx_512_to_1023",
3590        "rx_1024_to_1518",
3591        "tx_1024_to_1518",
3592        "tx_ether_collisions",
3593        "rx_crc_errors",
3594        "rx_jumbo",
3595        "tx_jumbo",
3596        "rx_mac_control_frames",
3597        "tx_mac_control_frames",
3598        "rx_frame_alignment_errors",
3599        "rx_long_ok",
3600        "rx_long_err",
3601        "tx_sqe_errors",
3602        "rx_no_buf",
3603        "rx_symbol_errors",
3604        "in_range_length_errors",
3605        "late_collisions"
3606};
3607
3608static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3609{
3610        switch (sset) {
3611        case ETH_SS_STATS:
3612                memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3613                break;
3614        }
3615}
3616
3617static int velocity_get_sset_count(struct net_device *dev, int sset)
3618{
3619        switch (sset) {
3620        case ETH_SS_STATS:
3621                return ARRAY_SIZE(velocity_gstrings);
3622        default:
3623                return -EOPNOTSUPP;
3624        }
3625}
3626
3627static void velocity_get_ethtool_stats(struct net_device *dev,
3628                                       struct ethtool_stats *stats, u64 *data)
3629{
3630        if (netif_running(dev)) {
3631                struct velocity_info *vptr = netdev_priv(dev);
3632                u32 *p = vptr->mib_counter;
3633                int i;
3634
3635                spin_lock_irq(&vptr->lock);
3636                velocity_update_hw_mibs(vptr);
3637                spin_unlock_irq(&vptr->lock);
3638
3639                for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3640                        *data++ = *p++;
3641        }
3642}
3643
3644static const struct ethtool_ops velocity_ethtool_ops = {
3645        .get_drvinfo            = velocity_get_drvinfo,
3646        .get_wol                = velocity_ethtool_get_wol,
3647        .set_wol                = velocity_ethtool_set_wol,
3648        .get_msglevel           = velocity_get_msglevel,
3649        .set_msglevel           = velocity_set_msglevel,
3650        .get_link               = velocity_get_link,
3651        .get_strings            = velocity_get_strings,
3652        .get_sset_count         = velocity_get_sset_count,
3653        .get_ethtool_stats      = velocity_get_ethtool_stats,
3654        .get_coalesce           = velocity_get_coalesce,
3655        .set_coalesce           = velocity_set_coalesce,
3656        .begin                  = velocity_ethtool_up,
3657        .complete               = velocity_ethtool_down,
3658        .get_link_ksettings     = velocity_get_link_ksettings,
3659        .set_link_ksettings     = velocity_set_link_ksettings,
3660};
3661
3662#if defined(CONFIG_PM) && defined(CONFIG_INET)
3663static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3664{
3665        struct in_ifaddr *ifa = ptr;
3666        struct net_device *dev = ifa->ifa_dev->dev;
3667
3668        if (dev_net(dev) == &init_net &&
3669            dev->netdev_ops == &velocity_netdev_ops)
3670                velocity_get_ip(netdev_priv(dev));
3671
3672        return NOTIFY_DONE;
3673}
3674
3675static struct notifier_block velocity_inetaddr_notifier = {
3676        .notifier_call  = velocity_netdev_event,
3677};
3678
3679static void velocity_register_notifier(void)
3680{
3681        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3682}
3683
3684static void velocity_unregister_notifier(void)
3685{
3686        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3687}
3688
3689#else
3690
3691#define velocity_register_notifier()    do {} while (0)
3692#define velocity_unregister_notifier()  do {} while (0)
3693
3694#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3695
3696/**
3697 *      velocity_init_module    -       load time function
3698 *
3699 *      Called when the velocity module is loaded. The PCI driver
3700 *      is registered with the PCI layer, and in turn will call
3701 *      the probe functions for each velocity adapter installed
3702 *      in the system.
3703 */
3704static int __init velocity_init_module(void)
3705{
3706        int ret_pci, ret_platform;
3707
3708        velocity_register_notifier();
3709
3710        ret_pci = pci_register_driver(&velocity_pci_driver);
3711        ret_platform = platform_driver_register(&velocity_platform_driver);
3712
3713        /* if both_registers failed, remove the notifier */
3714        if ((ret_pci < 0) && (ret_platform < 0)) {
3715                velocity_unregister_notifier();
3716                return ret_pci;
3717        }
3718
3719        return 0;
3720}
3721
3722/**
3723 *      velocity_cleanup        -       module unload
3724 *
3725 *      When the velocity hardware is unloaded this function is called.
3726 *      It will clean up the notifiers and the unregister the PCI
3727 *      driver interface for this hardware. This in turn cleans up
3728 *      all discovered interfaces before returning from the function
3729 */
3730static void __exit velocity_cleanup_module(void)
3731{
3732        velocity_unregister_notifier();
3733
3734        pci_unregister_driver(&velocity_pci_driver);
3735        platform_driver_unregister(&velocity_platform_driver);
3736}
3737
3738module_init(velocity_init_module);
3739module_exit(velocity_cleanup_module);
3740