linux/drivers/net/ethernet/via/via-velocity.c
<<
>>
Prefs
   1/*
   2 * This code is derived from the VIA reference driver (copyright message
   3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   4 * addition to the Linux kernel.
   5 *
   6 * The code has been merged into one source file, cleaned up to follow
   7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   8 * for 64bit hardware platforms.
   9 *
  10 * TODO
  11 *      rx_copybreak/alignment
  12 *      More testing
  13 *
  14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  15 * Additional fixes and clean up: Francois Romieu
  16 *
  17 * This source has not been verified for use in safety critical systems.
  18 *
  19 * Please direct queries about the revamped driver to the linux-kernel
  20 * list not VIA.
  21 *
  22 * Original code:
  23 *
  24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  25 * All rights reserved.
  26 *
  27 * This software may be redistributed and/or modified under
  28 * the terms of the GNU General Public License as published by the Free
  29 * Software Foundation; either version 2 of the License, or
  30 * any later version.
  31 *
  32 * This program is distributed in the hope that it will be useful, but
  33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  35 * for more details.
  36 *
  37 * Author: Chuang Liang-Shing, AJ Jiang
  38 *
  39 * Date: Jan 24, 2003
  40 *
  41 * MODULE_LICENSE("GPL");
  42 *
  43 */
  44
  45#include <linux/module.h>
  46#include <linux/types.h>
  47#include <linux/bitops.h>
  48#include <linux/init.h>
  49#include <linux/dma-mapping.h>
  50#include <linux/mm.h>
  51#include <linux/errno.h>
  52#include <linux/ioport.h>
  53#include <linux/pci.h>
  54#include <linux/kernel.h>
  55#include <linux/netdevice.h>
  56#include <linux/etherdevice.h>
  57#include <linux/skbuff.h>
  58#include <linux/delay.h>
  59#include <linux/timer.h>
  60#include <linux/slab.h>
  61#include <linux/interrupt.h>
  62#include <linux/string.h>
  63#include <linux/wait.h>
  64#include <linux/io.h>
  65#include <linux/if.h>
  66#include <linux/uaccess.h>
  67#include <linux/proc_fs.h>
  68#include <linux/of_address.h>
  69#include <linux/of_device.h>
  70#include <linux/of_irq.h>
  71#include <linux/inetdevice.h>
  72#include <linux/platform_device.h>
  73#include <linux/reboot.h>
  74#include <linux/ethtool.h>
  75#include <linux/mii.h>
  76#include <linux/in.h>
  77#include <linux/if_arp.h>
  78#include <linux/if_vlan.h>
  79#include <linux/ip.h>
  80#include <linux/tcp.h>
  81#include <linux/udp.h>
  82#include <linux/crc-ccitt.h>
  83#include <linux/crc32.h>
  84
  85#include "via-velocity.h"
  86
  87enum velocity_bus_type {
  88        BUS_PCI,
  89        BUS_PLATFORM,
  90};
  91
  92static int velocity_nics;
  93static int msglevel = MSG_LEVEL_INFO;
  94
  95static void velocity_set_power_state(struct velocity_info *vptr, char state)
  96{
  97        void *addr = vptr->mac_regs;
  98
  99        if (vptr->pdev)
 100                pci_set_power_state(vptr->pdev, state);
 101        else
 102                writeb(state, addr + 0x154);
 103}
 104
 105/**
 106 *      mac_get_cam_mask        -       Read a CAM mask
 107 *      @regs: register block for this velocity
 108 *      @mask: buffer to store mask
 109 *
 110 *      Fetch the mask bits of the selected CAM and store them into the
 111 *      provided mask buffer.
 112 */
 113static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 114{
 115        int i;
 116
 117        /* Select CAM mask */
 118        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 119
 120        writeb(0, &regs->CAMADDR);
 121
 122        /* read mask */
 123        for (i = 0; i < 8; i++)
 124                *mask++ = readb(&(regs->MARCAM[i]));
 125
 126        /* disable CAMEN */
 127        writeb(0, &regs->CAMADDR);
 128
 129        /* Select mar */
 130        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 131}
 132
 133/**
 134 *      mac_set_cam_mask        -       Set a CAM mask
 135 *      @regs: register block for this velocity
 136 *      @mask: CAM mask to load
 137 *
 138 *      Store a new mask into a CAM
 139 */
 140static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 141{
 142        int i;
 143        /* Select CAM mask */
 144        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 145
 146        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 147
 148        for (i = 0; i < 8; i++)
 149                writeb(*mask++, &(regs->MARCAM[i]));
 150
 151        /* disable CAMEN */
 152        writeb(0, &regs->CAMADDR);
 153
 154        /* Select mar */
 155        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 156}
 157
 158static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 159{
 160        int i;
 161        /* Select CAM mask */
 162        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 163
 164        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 165
 166        for (i = 0; i < 8; i++)
 167                writeb(*mask++, &(regs->MARCAM[i]));
 168
 169        /* disable CAMEN */
 170        writeb(0, &regs->CAMADDR);
 171
 172        /* Select mar */
 173        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 174}
 175
 176/**
 177 *      mac_set_cam     -       set CAM data
 178 *      @regs: register block of this velocity
 179 *      @idx: Cam index
 180 *      @addr: 2 or 6 bytes of CAM data
 181 *
 182 *      Load an address or vlan tag into a CAM
 183 */
 184static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 185{
 186        int i;
 187
 188        /* Select CAM mask */
 189        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 190
 191        idx &= (64 - 1);
 192
 193        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 194
 195        for (i = 0; i < 6; i++)
 196                writeb(*addr++, &(regs->MARCAM[i]));
 197
 198        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 199
 200        udelay(10);
 201
 202        writeb(0, &regs->CAMADDR);
 203
 204        /* Select mar */
 205        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 206}
 207
 208static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 209                             const u8 *addr)
 210{
 211
 212        /* Select CAM mask */
 213        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 214
 215        idx &= (64 - 1);
 216
 217        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 218        writew(*((u16 *) addr), &regs->MARCAM[0]);
 219
 220        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 221
 222        udelay(10);
 223
 224        writeb(0, &regs->CAMADDR);
 225
 226        /* Select mar */
 227        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 228}
 229
 230
 231/**
 232 *      mac_wol_reset   -       reset WOL after exiting low power
 233 *      @regs: register block of this velocity
 234 *
 235 *      Called after we drop out of wake on lan mode in order to
 236 *      reset the Wake on lan features. This function doesn't restore
 237 *      the rest of the logic from the result of sleep/wakeup
 238 */
 239static void mac_wol_reset(struct mac_regs __iomem *regs)
 240{
 241
 242        /* Turn off SWPTAG right after leaving power mode */
 243        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 244        /* clear sticky bits */
 245        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 246
 247        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 248        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 249        /* disable force PME-enable */
 250        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 251        /* disable power-event config bit */
 252        writew(0xFFFF, &regs->WOLCRClr);
 253        /* clear power status */
 254        writew(0xFFFF, &regs->WOLSRClr);
 255}
 256
 257static const struct ethtool_ops velocity_ethtool_ops;
 258
 259/*
 260    Define module options
 261*/
 262
 263MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 264MODULE_LICENSE("GPL");
 265MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 266
 267#define VELOCITY_PARAM(N, D) \
 268        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 269        module_param_array(N, int, NULL, 0); \
 270        MODULE_PARM_DESC(N, D);
 271
 272#define RX_DESC_MIN     64
 273#define RX_DESC_MAX     255
 274#define RX_DESC_DEF     64
 275VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 276
 277#define TX_DESC_MIN     16
 278#define TX_DESC_MAX     256
 279#define TX_DESC_DEF     64
 280VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 281
 282#define RX_THRESH_MIN   0
 283#define RX_THRESH_MAX   3
 284#define RX_THRESH_DEF   0
 285/* rx_thresh[] is used for controlling the receive fifo threshold.
 286   0: indicate the rxfifo threshold is 128 bytes.
 287   1: indicate the rxfifo threshold is 512 bytes.
 288   2: indicate the rxfifo threshold is 1024 bytes.
 289   3: indicate the rxfifo threshold is store & forward.
 290*/
 291VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 292
 293#define DMA_LENGTH_MIN  0
 294#define DMA_LENGTH_MAX  7
 295#define DMA_LENGTH_DEF  6
 296
 297/* DMA_length[] is used for controlling the DMA length
 298   0: 8 DWORDs
 299   1: 16 DWORDs
 300   2: 32 DWORDs
 301   3: 64 DWORDs
 302   4: 128 DWORDs
 303   5: 256 DWORDs
 304   6: SF(flush till emply)
 305   7: SF(flush till emply)
 306*/
 307VELOCITY_PARAM(DMA_length, "DMA length");
 308
 309#define IP_ALIG_DEF     0
 310/* IP_byte_align[] is used for IP header DWORD byte aligned
 311   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 312   1: indicate the IP header will be DWORD byte aligned.
 313      In some environment, the IP header should be DWORD byte aligned,
 314      or the packet will be droped when we receive it. (eg: IPVS)
 315*/
 316VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 317
 318#define FLOW_CNTL_DEF   1
 319#define FLOW_CNTL_MIN   1
 320#define FLOW_CNTL_MAX   5
 321
 322/* flow_control[] is used for setting the flow control ability of NIC.
 323   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 324   2: enable TX flow control.
 325   3: enable RX flow control.
 326   4: enable RX/TX flow control.
 327   5: disable
 328*/
 329VELOCITY_PARAM(flow_control, "Enable flow control ability");
 330
 331#define MED_LNK_DEF 0
 332#define MED_LNK_MIN 0
 333#define MED_LNK_MAX 5
 334/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 335   0: indicate autonegotiation for both speed and duplex mode
 336   1: indicate 100Mbps half duplex mode
 337   2: indicate 100Mbps full duplex mode
 338   3: indicate 10Mbps half duplex mode
 339   4: indicate 10Mbps full duplex mode
 340   5: indicate 1000Mbps full duplex mode
 341
 342   Note:
 343   if EEPROM have been set to the force mode, this option is ignored
 344   by driver.
 345*/
 346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 347
 348#define WOL_OPT_DEF     0
 349#define WOL_OPT_MIN     0
 350#define WOL_OPT_MAX     7
 351/* wol_opts[] is used for controlling wake on lan behavior.
 352   0: Wake up if recevied a magic packet. (Default)
 353   1: Wake up if link status is on/off.
 354   2: Wake up if recevied an arp packet.
 355   4: Wake up if recevied any unicast packet.
 356   Those value can be sumed up to support more than one option.
 357*/
 358VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 359
 360static int rx_copybreak = 200;
 361module_param(rx_copybreak, int, 0644);
 362MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 363
 364/*
 365 *      Internal board variants. At the moment we have only one
 366 */
 367static struct velocity_info_tbl chip_info_table[] = {
 368        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 369        { }
 370};
 371
 372/*
 373 *      Describe the PCI device identifiers that we support in this
 374 *      device driver. Used for hotplug autoloading.
 375 */
 376
 377static const struct pci_device_id velocity_pci_id_table[] = {
 378        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 379        { }
 380};
 381
 382MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
 383
 384/**
 385 *      Describe the OF device identifiers that we support in this
 386 *      device driver. Used for devicetree nodes.
 387 */
 388static const struct of_device_id velocity_of_ids[] = {
 389        { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
 390        { /* Sentinel */ },
 391};
 392MODULE_DEVICE_TABLE(of, velocity_of_ids);
 393
 394/**
 395 *      get_chip_name   -       identifier to name
 396 *      @id: chip identifier
 397 *
 398 *      Given a chip identifier return a suitable description. Returns
 399 *      a pointer a static string valid while the driver is loaded.
 400 */
 401static const char *get_chip_name(enum chip_type chip_id)
 402{
 403        int i;
 404        for (i = 0; chip_info_table[i].name != NULL; i++)
 405                if (chip_info_table[i].chip_id == chip_id)
 406                        break;
 407        return chip_info_table[i].name;
 408}
 409
 410/**
 411 *      velocity_set_int_opt    -       parser for integer options
 412 *      @opt: pointer to option value
 413 *      @val: value the user requested (or -1 for default)
 414 *      @min: lowest value allowed
 415 *      @max: highest value allowed
 416 *      @def: default value
 417 *      @name: property name
 418 *      @dev: device name
 419 *
 420 *      Set an integer property in the module options. This function does
 421 *      all the verification and checking as well as reporting so that
 422 *      we don't duplicate code for each option.
 423 */
 424static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
 425                                 char *name, const char *devname)
 426{
 427        if (val == -1)
 428                *opt = def;
 429        else if (val < min || val > max) {
 430                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 431                                        devname, name, min, max);
 432                *opt = def;
 433        } else {
 434                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 435                                        devname, name, val);
 436                *opt = val;
 437        }
 438}
 439
 440/**
 441 *      velocity_set_bool_opt   -       parser for boolean options
 442 *      @opt: pointer to option value
 443 *      @val: value the user requested (or -1 for default)
 444 *      @def: default value (yes/no)
 445 *      @flag: numeric value to set for true.
 446 *      @name: property name
 447 *      @dev: device name
 448 *
 449 *      Set a boolean property in the module options. This function does
 450 *      all the verification and checking as well as reporting so that
 451 *      we don't duplicate code for each option.
 452 */
 453static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
 454                                  char *name, const char *devname)
 455{
 456        (*opt) &= (~flag);
 457        if (val == -1)
 458                *opt |= (def ? flag : 0);
 459        else if (val < 0 || val > 1) {
 460                printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 461                        devname, name);
 462                *opt |= (def ? flag : 0);
 463        } else {
 464                printk(KERN_INFO "%s: set parameter %s to %s\n",
 465                        devname, name, val ? "TRUE" : "FALSE");
 466                *opt |= (val ? flag : 0);
 467        }
 468}
 469
 470/**
 471 *      velocity_get_options    -       set options on device
 472 *      @opts: option structure for the device
 473 *      @index: index of option to use in module options array
 474 *      @devname: device name
 475 *
 476 *      Turn the module and command options into a single structure
 477 *      for the current device
 478 */
 479static void velocity_get_options(struct velocity_opt *opts, int index,
 480                                 const char *devname)
 481{
 482
 483        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 484        velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 485        velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 486        velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 487
 488        velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 489        velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 490        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 491        velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 492        opts->numrx = (opts->numrx & ~3);
 493}
 494
 495/**
 496 *      velocity_init_cam_filter        -       initialise CAM
 497 *      @vptr: velocity to program
 498 *
 499 *      Initialize the content addressable memory used for filters. Load
 500 *      appropriately according to the presence of VLAN
 501 */
 502static void velocity_init_cam_filter(struct velocity_info *vptr)
 503{
 504        struct mac_regs __iomem *regs = vptr->mac_regs;
 505        unsigned int vid, i = 0;
 506
 507        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 508        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 509        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 510
 511        /* Disable all CAMs */
 512        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 513        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 514        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 515        mac_set_cam_mask(regs, vptr->mCAMmask);
 516
 517        /* Enable VCAMs */
 518        for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 519                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 520                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 521                if (++i >= VCAM_SIZE)
 522                        break;
 523        }
 524        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 525}
 526
 527static int velocity_vlan_rx_add_vid(struct net_device *dev,
 528                                    __be16 proto, u16 vid)
 529{
 530        struct velocity_info *vptr = netdev_priv(dev);
 531
 532        spin_lock_irq(&vptr->lock);
 533        set_bit(vid, vptr->active_vlans);
 534        velocity_init_cam_filter(vptr);
 535        spin_unlock_irq(&vptr->lock);
 536        return 0;
 537}
 538
 539static int velocity_vlan_rx_kill_vid(struct net_device *dev,
 540                                     __be16 proto, u16 vid)
 541{
 542        struct velocity_info *vptr = netdev_priv(dev);
 543
 544        spin_lock_irq(&vptr->lock);
 545        clear_bit(vid, vptr->active_vlans);
 546        velocity_init_cam_filter(vptr);
 547        spin_unlock_irq(&vptr->lock);
 548        return 0;
 549}
 550
 551static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 552{
 553        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 554}
 555
 556/**
 557 *      velocity_rx_reset       -       handle a receive reset
 558 *      @vptr: velocity we are resetting
 559 *
 560 *      Reset the ownership and status for the receive ring side.
 561 *      Hand all the receive queue to the NIC.
 562 */
 563static void velocity_rx_reset(struct velocity_info *vptr)
 564{
 565
 566        struct mac_regs __iomem *regs = vptr->mac_regs;
 567        int i;
 568
 569        velocity_init_rx_ring_indexes(vptr);
 570
 571        /*
 572         *      Init state, all RD entries belong to the NIC
 573         */
 574        for (i = 0; i < vptr->options.numrx; ++i)
 575                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 576
 577        writew(vptr->options.numrx, &regs->RBRDU);
 578        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 579        writew(0, &regs->RDIdx);
 580        writew(vptr->options.numrx - 1, &regs->RDCSize);
 581}
 582
 583/**
 584 *      velocity_get_opt_media_mode     -       get media selection
 585 *      @vptr: velocity adapter
 586 *
 587 *      Get the media mode stored in EEPROM or module options and load
 588 *      mii_status accordingly. The requested link state information
 589 *      is also returned.
 590 */
 591static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 592{
 593        u32 status = 0;
 594
 595        switch (vptr->options.spd_dpx) {
 596        case SPD_DPX_AUTO:
 597                status = VELOCITY_AUTONEG_ENABLE;
 598                break;
 599        case SPD_DPX_100_FULL:
 600                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 601                break;
 602        case SPD_DPX_10_FULL:
 603                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 604                break;
 605        case SPD_DPX_100_HALF:
 606                status = VELOCITY_SPEED_100;
 607                break;
 608        case SPD_DPX_10_HALF:
 609                status = VELOCITY_SPEED_10;
 610                break;
 611        case SPD_DPX_1000_FULL:
 612                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 613                break;
 614        }
 615        vptr->mii_status = status;
 616        return status;
 617}
 618
 619/**
 620 *      safe_disable_mii_autopoll       -       autopoll off
 621 *      @regs: velocity registers
 622 *
 623 *      Turn off the autopoll and wait for it to disable on the chip
 624 */
 625static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 626{
 627        u16 ww;
 628
 629        /*  turn off MAUTO */
 630        writeb(0, &regs->MIICR);
 631        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 632                udelay(1);
 633                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 634                        break;
 635        }
 636}
 637
 638/**
 639 *      enable_mii_autopoll     -       turn on autopolling
 640 *      @regs: velocity registers
 641 *
 642 *      Enable the MII link status autopoll feature on the Velocity
 643 *      hardware. Wait for it to enable.
 644 */
 645static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 646{
 647        int ii;
 648
 649        writeb(0, &(regs->MIICR));
 650        writeb(MIIADR_SWMPL, &regs->MIIADR);
 651
 652        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 653                udelay(1);
 654                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 655                        break;
 656        }
 657
 658        writeb(MIICR_MAUTO, &regs->MIICR);
 659
 660        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 661                udelay(1);
 662                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 663                        break;
 664        }
 665
 666}
 667
 668/**
 669 *      velocity_mii_read       -       read MII data
 670 *      @regs: velocity registers
 671 *      @index: MII register index
 672 *      @data: buffer for received data
 673 *
 674 *      Perform a single read of an MII 16bit register. Returns zero
 675 *      on success or -ETIMEDOUT if the PHY did not respond.
 676 */
 677static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 678{
 679        u16 ww;
 680
 681        /*
 682         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 683         */
 684        safe_disable_mii_autopoll(regs);
 685
 686        writeb(index, &regs->MIIADR);
 687
 688        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 689
 690        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 691                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 692                        break;
 693        }
 694
 695        *data = readw(&regs->MIIDATA);
 696
 697        enable_mii_autopoll(regs);
 698        if (ww == W_MAX_TIMEOUT)
 699                return -ETIMEDOUT;
 700        return 0;
 701}
 702
 703/**
 704 *      mii_check_media_mode    -       check media state
 705 *      @regs: velocity registers
 706 *
 707 *      Check the current MII status and determine the link status
 708 *      accordingly
 709 */
 710static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 711{
 712        u32 status = 0;
 713        u16 ANAR;
 714
 715        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 716                status |= VELOCITY_LINK_FAIL;
 717
 718        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 719                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 720        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 721                status |= (VELOCITY_SPEED_1000);
 722        else {
 723                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 724                if (ANAR & ADVERTISE_100FULL)
 725                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 726                else if (ANAR & ADVERTISE_100HALF)
 727                        status |= VELOCITY_SPEED_100;
 728                else if (ANAR & ADVERTISE_10FULL)
 729                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 730                else
 731                        status |= (VELOCITY_SPEED_10);
 732        }
 733
 734        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 735                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 736                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 737                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 738                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 739                                status |= VELOCITY_AUTONEG_ENABLE;
 740                }
 741        }
 742
 743        return status;
 744}
 745
 746/**
 747 *      velocity_mii_write      -       write MII data
 748 *      @regs: velocity registers
 749 *      @index: MII register index
 750 *      @data: 16bit data for the MII register
 751 *
 752 *      Perform a single write to an MII 16bit register. Returns zero
 753 *      on success or -ETIMEDOUT if the PHY did not respond.
 754 */
 755static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 756{
 757        u16 ww;
 758
 759        /*
 760         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 761         */
 762        safe_disable_mii_autopoll(regs);
 763
 764        /* MII reg offset */
 765        writeb(mii_addr, &regs->MIIADR);
 766        /* set MII data */
 767        writew(data, &regs->MIIDATA);
 768
 769        /* turn on MIICR_WCMD */
 770        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 771
 772        /* W_MAX_TIMEOUT is the timeout period */
 773        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 774                udelay(5);
 775                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 776                        break;
 777        }
 778        enable_mii_autopoll(regs);
 779
 780        if (ww == W_MAX_TIMEOUT)
 781                return -ETIMEDOUT;
 782        return 0;
 783}
 784
 785/**
 786 *      set_mii_flow_control    -       flow control setup
 787 *      @vptr: velocity interface
 788 *
 789 *      Set up the flow control on this interface according to
 790 *      the supplied user/eeprom options.
 791 */
 792static void set_mii_flow_control(struct velocity_info *vptr)
 793{
 794        /*Enable or Disable PAUSE in ANAR */
 795        switch (vptr->options.flow_cntl) {
 796        case FLOW_CNTL_TX:
 797                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 798                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 799                break;
 800
 801        case FLOW_CNTL_RX:
 802                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 803                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 804                break;
 805
 806        case FLOW_CNTL_TX_RX:
 807                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 808                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 809                break;
 810
 811        case FLOW_CNTL_DISABLE:
 812                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 813                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 814                break;
 815        default:
 816                break;
 817        }
 818}
 819
 820/**
 821 *      mii_set_auto_on         -       autonegotiate on
 822 *      @vptr: velocity
 823 *
 824 *      Enable autonegotation on this interface
 825 */
 826static void mii_set_auto_on(struct velocity_info *vptr)
 827{
 828        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 829                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 830        else
 831                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 832}
 833
 834static u32 check_connection_type(struct mac_regs __iomem *regs)
 835{
 836        u32 status = 0;
 837        u8 PHYSR0;
 838        u16 ANAR;
 839        PHYSR0 = readb(&regs->PHYSR0);
 840
 841        /*
 842           if (!(PHYSR0 & PHYSR0_LINKGD))
 843           status|=VELOCITY_LINK_FAIL;
 844         */
 845
 846        if (PHYSR0 & PHYSR0_FDPX)
 847                status |= VELOCITY_DUPLEX_FULL;
 848
 849        if (PHYSR0 & PHYSR0_SPDG)
 850                status |= VELOCITY_SPEED_1000;
 851        else if (PHYSR0 & PHYSR0_SPD10)
 852                status |= VELOCITY_SPEED_10;
 853        else
 854                status |= VELOCITY_SPEED_100;
 855
 856        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 857                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 858                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 859                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 860                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 861                                status |= VELOCITY_AUTONEG_ENABLE;
 862                }
 863        }
 864
 865        return status;
 866}
 867
 868/**
 869 *      velocity_set_media_mode         -       set media mode
 870 *      @mii_status: old MII link state
 871 *
 872 *      Check the media link state and configure the flow control
 873 *      PHY and also velocity hardware setup accordingly. In particular
 874 *      we need to set up CD polling and frame bursting.
 875 */
 876static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 877{
 878        u32 curr_status;
 879        struct mac_regs __iomem *regs = vptr->mac_regs;
 880
 881        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 882        curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 883
 884        /* Set mii link status */
 885        set_mii_flow_control(vptr);
 886
 887        /*
 888           Check if new status is consistent with current status
 889           if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 890               (mii_status==curr_status)) {
 891           vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 892           vptr->mii_status=check_connection_type(vptr->mac_regs);
 893           VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 894           return 0;
 895           }
 896         */
 897
 898        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 899                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 900
 901        /*
 902         *      If connection type is AUTO
 903         */
 904        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 905                VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 906                /* clear force MAC mode bit */
 907                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 908                /* set duplex mode of MAC according to duplex mode of MII */
 909                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 910                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 911                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 912
 913                /* enable AUTO-NEGO mode */
 914                mii_set_auto_on(vptr);
 915        } else {
 916                u16 CTRL1000;
 917                u16 ANAR;
 918                u8 CHIPGCR;
 919
 920                /*
 921                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 922                 *    and enable it in fullduplex mode
 923                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 924                 * 3. only enable CD heart beat counter in 10HD mode
 925                 */
 926
 927                /* set force MAC mode bit */
 928                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 929
 930                CHIPGCR = readb(&regs->CHIPGCR);
 931
 932                if (mii_status & VELOCITY_SPEED_1000)
 933                        CHIPGCR |= CHIPGCR_FCGMII;
 934                else
 935                        CHIPGCR &= ~CHIPGCR_FCGMII;
 936
 937                if (mii_status & VELOCITY_DUPLEX_FULL) {
 938                        CHIPGCR |= CHIPGCR_FCFDX;
 939                        writeb(CHIPGCR, &regs->CHIPGCR);
 940                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 941                        if (vptr->rev_id < REV_ID_VT3216_A0)
 942                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 943                } else {
 944                        CHIPGCR &= ~CHIPGCR_FCFDX;
 945                        VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 946                        writeb(CHIPGCR, &regs->CHIPGCR);
 947                        if (vptr->rev_id < REV_ID_VT3216_A0)
 948                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 949                }
 950
 951                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 952                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 953                if ((mii_status & VELOCITY_SPEED_1000) &&
 954                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 955                        CTRL1000 |= ADVERTISE_1000FULL;
 956                }
 957                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 958
 959                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 960                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 961                else
 962                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 963
 964                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 965                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 966                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 967                if (mii_status & VELOCITY_SPEED_100) {
 968                        if (mii_status & VELOCITY_DUPLEX_FULL)
 969                                ANAR |= ADVERTISE_100FULL;
 970                        else
 971                                ANAR |= ADVERTISE_100HALF;
 972                } else if (mii_status & VELOCITY_SPEED_10) {
 973                        if (mii_status & VELOCITY_DUPLEX_FULL)
 974                                ANAR |= ADVERTISE_10FULL;
 975                        else
 976                                ANAR |= ADVERTISE_10HALF;
 977                }
 978                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 979                /* enable AUTO-NEGO mode */
 980                mii_set_auto_on(vptr);
 981                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 982        }
 983        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 984        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 985        return VELOCITY_LINK_CHANGE;
 986}
 987
 988/**
 989 *      velocity_print_link_status      -       link status reporting
 990 *      @vptr: velocity to report on
 991 *
 992 *      Turn the link status of the velocity card into a kernel log
 993 *      description of the new link state, detailing speed and duplex
 994 *      status
 995 */
 996static void velocity_print_link_status(struct velocity_info *vptr)
 997{
 998
 999        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1000                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
1001        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1002                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
1003
1004                if (vptr->mii_status & VELOCITY_SPEED_1000)
1005                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1006                else if (vptr->mii_status & VELOCITY_SPEED_100)
1007                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1008                else
1009                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1010
1011                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1012                        VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1013                else
1014                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1015        } else {
1016                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
1017                switch (vptr->options.spd_dpx) {
1018                case SPD_DPX_1000_FULL:
1019                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1020                        break;
1021                case SPD_DPX_100_HALF:
1022                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1023                        break;
1024                case SPD_DPX_100_FULL:
1025                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1026                        break;
1027                case SPD_DPX_10_HALF:
1028                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1029                        break;
1030                case SPD_DPX_10_FULL:
1031                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1032                        break;
1033                default:
1034                        break;
1035                }
1036        }
1037}
1038
1039/**
1040 *      enable_flow_control_ability     -       flow control
1041 *      @vptr: veloity to configure
1042 *
1043 *      Set up flow control according to the flow control options
1044 *      determined by the eeprom/configuration.
1045 */
1046static void enable_flow_control_ability(struct velocity_info *vptr)
1047{
1048
1049        struct mac_regs __iomem *regs = vptr->mac_regs;
1050
1051        switch (vptr->options.flow_cntl) {
1052
1053        case FLOW_CNTL_DEFAULT:
1054                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1055                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1056                else
1057                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1058
1059                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1060                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1061                else
1062                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1063                break;
1064
1065        case FLOW_CNTL_TX:
1066                writel(CR0_FDXTFCEN, &regs->CR0Set);
1067                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1068                break;
1069
1070        case FLOW_CNTL_RX:
1071                writel(CR0_FDXRFCEN, &regs->CR0Set);
1072                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1073                break;
1074
1075        case FLOW_CNTL_TX_RX:
1076                writel(CR0_FDXTFCEN, &regs->CR0Set);
1077                writel(CR0_FDXRFCEN, &regs->CR0Set);
1078                break;
1079
1080        case FLOW_CNTL_DISABLE:
1081                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1082                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1083                break;
1084
1085        default:
1086                break;
1087        }
1088
1089}
1090
1091/**
1092 *      velocity_soft_reset     -       soft reset
1093 *      @vptr: velocity to reset
1094 *
1095 *      Kick off a soft reset of the velocity adapter and then poll
1096 *      until the reset sequence has completed before returning.
1097 */
1098static int velocity_soft_reset(struct velocity_info *vptr)
1099{
1100        struct mac_regs __iomem *regs = vptr->mac_regs;
1101        int i = 0;
1102
1103        writel(CR0_SFRST, &regs->CR0Set);
1104
1105        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1106                udelay(5);
1107                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1108                        break;
1109        }
1110
1111        if (i == W_MAX_TIMEOUT) {
1112                writel(CR0_FORSRST, &regs->CR0Set);
1113                /* FIXME: PCI POSTING */
1114                /* delay 2ms */
1115                mdelay(2);
1116        }
1117        return 0;
1118}
1119
1120/**
1121 *      velocity_set_multi      -       filter list change callback
1122 *      @dev: network device
1123 *
1124 *      Called by the network layer when the filter lists need to change
1125 *      for a velocity adapter. Reload the CAMs with the new address
1126 *      filter ruleset.
1127 */
1128static void velocity_set_multi(struct net_device *dev)
1129{
1130        struct velocity_info *vptr = netdev_priv(dev);
1131        struct mac_regs __iomem *regs = vptr->mac_regs;
1132        u8 rx_mode;
1133        int i;
1134        struct netdev_hw_addr *ha;
1135
1136        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1137                writel(0xffffffff, &regs->MARCAM[0]);
1138                writel(0xffffffff, &regs->MARCAM[4]);
1139                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1140        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1141                   (dev->flags & IFF_ALLMULTI)) {
1142                writel(0xffffffff, &regs->MARCAM[0]);
1143                writel(0xffffffff, &regs->MARCAM[4]);
1144                rx_mode = (RCR_AM | RCR_AB);
1145        } else {
1146                int offset = MCAM_SIZE - vptr->multicast_limit;
1147                mac_get_cam_mask(regs, vptr->mCAMmask);
1148
1149                i = 0;
1150                netdev_for_each_mc_addr(ha, dev) {
1151                        mac_set_cam(regs, i + offset, ha->addr);
1152                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1153                        i++;
1154                }
1155
1156                mac_set_cam_mask(regs, vptr->mCAMmask);
1157                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1158        }
1159        if (dev->mtu > 1500)
1160                rx_mode |= RCR_AL;
1161
1162        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1163
1164}
1165
1166/*
1167 * MII access , media link mode setting functions
1168 */
1169
1170/**
1171 *      mii_init        -       set up MII
1172 *      @vptr: velocity adapter
1173 *      @mii_status:  links tatus
1174 *
1175 *      Set up the PHY for the current link state.
1176 */
1177static void mii_init(struct velocity_info *vptr, u32 mii_status)
1178{
1179        u16 BMCR;
1180
1181        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1182        case PHYID_ICPLUS_IP101A:
1183                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1184                                                MII_ADVERTISE, vptr->mac_regs);
1185                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1186                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1187                                                                vptr->mac_regs);
1188                else
1189                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1190                                                                vptr->mac_regs);
1191                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1192                break;
1193        case PHYID_CICADA_CS8201:
1194                /*
1195                 *      Reset to hardware default
1196                 */
1197                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1198                /*
1199                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1200                 *      off it in NWay-forced half mode for NWay-forced v.s.
1201                 *      legacy-forced issue.
1202                 */
1203                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1204                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1205                else
1206                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1207                /*
1208                 *      Turn on Link/Activity LED enable bit for CIS8201
1209                 */
1210                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1211                break;
1212        case PHYID_VT3216_32BIT:
1213        case PHYID_VT3216_64BIT:
1214                /*
1215                 *      Reset to hardware default
1216                 */
1217                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1218                /*
1219                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1220                 *      off it in NWay-forced half mode for NWay-forced v.s.
1221                 *      legacy-forced issue
1222                 */
1223                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1224                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1225                else
1226                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1227                break;
1228
1229        case PHYID_MARVELL_1000:
1230        case PHYID_MARVELL_1000S:
1231                /*
1232                 *      Assert CRS on Transmit
1233                 */
1234                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1235                /*
1236                 *      Reset to hardware default
1237                 */
1238                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1239                break;
1240        default:
1241                ;
1242        }
1243        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1244        if (BMCR & BMCR_ISOLATE) {
1245                BMCR &= ~BMCR_ISOLATE;
1246                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1247        }
1248}
1249
1250/**
1251 * setup_queue_timers   -       Setup interrupt timers
1252 *
1253 * Setup interrupt frequency during suppression (timeout if the frame
1254 * count isn't filled).
1255 */
1256static void setup_queue_timers(struct velocity_info *vptr)
1257{
1258        /* Only for newer revisions */
1259        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1260                u8 txqueue_timer = 0;
1261                u8 rxqueue_timer = 0;
1262
1263                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1264                                VELOCITY_SPEED_100)) {
1265                        txqueue_timer = vptr->options.txqueue_timer;
1266                        rxqueue_timer = vptr->options.rxqueue_timer;
1267                }
1268
1269                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1270                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1271        }
1272}
1273
1274/**
1275 * setup_adaptive_interrupts  -  Setup interrupt suppression
1276 *
1277 * @vptr velocity adapter
1278 *
1279 * The velocity is able to suppress interrupt during high interrupt load.
1280 * This function turns on that feature.
1281 */
1282static void setup_adaptive_interrupts(struct velocity_info *vptr)
1283{
1284        struct mac_regs __iomem *regs = vptr->mac_regs;
1285        u16 tx_intsup = vptr->options.tx_intsup;
1286        u16 rx_intsup = vptr->options.rx_intsup;
1287
1288        /* Setup default interrupt mask (will be changed below) */
1289        vptr->int_mask = INT_MASK_DEF;
1290
1291        /* Set Tx Interrupt Suppression Threshold */
1292        writeb(CAMCR_PS0, &regs->CAMCR);
1293        if (tx_intsup != 0) {
1294                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1295                                ISR_PTX2I | ISR_PTX3I);
1296                writew(tx_intsup, &regs->ISRCTL);
1297        } else
1298                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1299
1300        /* Set Rx Interrupt Suppression Threshold */
1301        writeb(CAMCR_PS1, &regs->CAMCR);
1302        if (rx_intsup != 0) {
1303                vptr->int_mask &= ~ISR_PRXI;
1304                writew(rx_intsup, &regs->ISRCTL);
1305        } else
1306                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1307
1308        /* Select page to interrupt hold timer */
1309        writeb(0, &regs->CAMCR);
1310}
1311
1312/**
1313 *      velocity_init_registers -       initialise MAC registers
1314 *      @vptr: velocity to init
1315 *      @type: type of initialisation (hot or cold)
1316 *
1317 *      Initialise the MAC on a reset or on first set up on the
1318 *      hardware.
1319 */
1320static void velocity_init_registers(struct velocity_info *vptr,
1321                                    enum velocity_init_type type)
1322{
1323        struct mac_regs __iomem *regs = vptr->mac_regs;
1324        struct net_device *netdev = vptr->netdev;
1325        int i, mii_status;
1326
1327        mac_wol_reset(regs);
1328
1329        switch (type) {
1330        case VELOCITY_INIT_RESET:
1331        case VELOCITY_INIT_WOL:
1332
1333                netif_stop_queue(netdev);
1334
1335                /*
1336                 *      Reset RX to prevent RX pointer not on the 4X location
1337                 */
1338                velocity_rx_reset(vptr);
1339                mac_rx_queue_run(regs);
1340                mac_rx_queue_wake(regs);
1341
1342                mii_status = velocity_get_opt_media_mode(vptr);
1343                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1344                        velocity_print_link_status(vptr);
1345                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1346                                netif_wake_queue(netdev);
1347                }
1348
1349                enable_flow_control_ability(vptr);
1350
1351                mac_clear_isr(regs);
1352                writel(CR0_STOP, &regs->CR0Clr);
1353                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1354                                                        &regs->CR0Set);
1355
1356                break;
1357
1358        case VELOCITY_INIT_COLD:
1359        default:
1360                /*
1361                 *      Do reset
1362                 */
1363                velocity_soft_reset(vptr);
1364                mdelay(5);
1365
1366                if (!vptr->no_eeprom) {
1367                        mac_eeprom_reload(regs);
1368                        for (i = 0; i < 6; i++)
1369                                writeb(netdev->dev_addr[i], regs->PAR + i);
1370                }
1371
1372                /*
1373                 *      clear Pre_ACPI bit.
1374                 */
1375                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1376                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1377                mac_set_dma_length(regs, vptr->options.DMA_length);
1378
1379                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1380                /*
1381                 *      Back off algorithm use original IEEE standard
1382                 */
1383                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1384
1385                /*
1386                 *      Init CAM filter
1387                 */
1388                velocity_init_cam_filter(vptr);
1389
1390                /*
1391                 *      Set packet filter: Receive directed and broadcast address
1392                 */
1393                velocity_set_multi(netdev);
1394
1395                /*
1396                 *      Enable MII auto-polling
1397                 */
1398                enable_mii_autopoll(regs);
1399
1400                setup_adaptive_interrupts(vptr);
1401
1402                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1403                writew(vptr->options.numrx - 1, &regs->RDCSize);
1404                mac_rx_queue_run(regs);
1405                mac_rx_queue_wake(regs);
1406
1407                writew(vptr->options.numtx - 1, &regs->TDCSize);
1408
1409                for (i = 0; i < vptr->tx.numq; i++) {
1410                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1411                        mac_tx_queue_run(regs, i);
1412                }
1413
1414                init_flow_control_register(vptr);
1415
1416                writel(CR0_STOP, &regs->CR0Clr);
1417                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1418
1419                mii_status = velocity_get_opt_media_mode(vptr);
1420                netif_stop_queue(netdev);
1421
1422                mii_init(vptr, mii_status);
1423
1424                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1425                        velocity_print_link_status(vptr);
1426                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1427                                netif_wake_queue(netdev);
1428                }
1429
1430                enable_flow_control_ability(vptr);
1431                mac_hw_mibs_init(regs);
1432                mac_write_int_mask(vptr->int_mask, regs);
1433                mac_clear_isr(regs);
1434
1435        }
1436}
1437
1438static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1439{
1440        struct mac_regs __iomem *regs = vptr->mac_regs;
1441        int avail, dirty, unusable;
1442
1443        /*
1444         * RD number must be equal to 4X per hardware spec
1445         * (programming guide rev 1.20, p.13)
1446         */
1447        if (vptr->rx.filled < 4)
1448                return;
1449
1450        wmb();
1451
1452        unusable = vptr->rx.filled & 0x0003;
1453        dirty = vptr->rx.dirty - unusable;
1454        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1455                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1456                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1457        }
1458
1459        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1460        vptr->rx.filled = unusable;
1461}
1462
1463/**
1464 *      velocity_init_dma_rings -       set up DMA rings
1465 *      @vptr: Velocity to set up
1466 *
1467 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1468 *      to use.
1469 */
1470static int velocity_init_dma_rings(struct velocity_info *vptr)
1471{
1472        struct velocity_opt *opt = &vptr->options;
1473        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1474        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1475        dma_addr_t pool_dma;
1476        void *pool;
1477        unsigned int i;
1478
1479        /*
1480         * Allocate all RD/TD rings a single pool.
1481         *
1482         * dma_alloc_coherent() fulfills the requirement for 64 bytes
1483         * alignment
1484         */
1485        pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1486                                    rx_ring_size, &pool_dma, GFP_ATOMIC);
1487        if (!pool) {
1488                dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1489                        vptr->netdev->name);
1490                return -ENOMEM;
1491        }
1492
1493        vptr->rx.ring = pool;
1494        vptr->rx.pool_dma = pool_dma;
1495
1496        pool += rx_ring_size;
1497        pool_dma += rx_ring_size;
1498
1499        for (i = 0; i < vptr->tx.numq; i++) {
1500                vptr->tx.rings[i] = pool;
1501                vptr->tx.pool_dma[i] = pool_dma;
1502                pool += tx_ring_size;
1503                pool_dma += tx_ring_size;
1504        }
1505
1506        return 0;
1507}
1508
1509static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1510{
1511        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1512}
1513
1514/**
1515 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1516 *      @vptr: velocity
1517 *      @idx: ring index
1518 *
1519 *      Allocate a new full sized buffer for the reception of a frame and
1520 *      map it into PCI space for the hardware to use. The hardware
1521 *      requires *64* byte alignment of the buffer which makes life
1522 *      less fun than would be ideal.
1523 */
1524static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1525{
1526        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1527        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1528
1529        rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1530        if (rd_info->skb == NULL)
1531                return -ENOMEM;
1532
1533        /*
1534         *      Do the gymnastics to get the buffer head for data at
1535         *      64byte alignment.
1536         */
1537        skb_reserve(rd_info->skb,
1538                        64 - ((unsigned long) rd_info->skb->data & 63));
1539        rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1540                                        vptr->rx.buf_sz, DMA_FROM_DEVICE);
1541
1542        /*
1543         *      Fill in the descriptor to match
1544         */
1545
1546        *((u32 *) & (rd->rdesc0)) = 0;
1547        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1548        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1549        rd->pa_high = 0;
1550        return 0;
1551}
1552
1553
1554static int velocity_rx_refill(struct velocity_info *vptr)
1555{
1556        int dirty = vptr->rx.dirty, done = 0;
1557
1558        do {
1559                struct rx_desc *rd = vptr->rx.ring + dirty;
1560
1561                /* Fine for an all zero Rx desc at init time as well */
1562                if (rd->rdesc0.len & OWNED_BY_NIC)
1563                        break;
1564
1565                if (!vptr->rx.info[dirty].skb) {
1566                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1567                                break;
1568                }
1569                done++;
1570                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1571        } while (dirty != vptr->rx.curr);
1572
1573        if (done) {
1574                vptr->rx.dirty = dirty;
1575                vptr->rx.filled += done;
1576        }
1577
1578        return done;
1579}
1580
1581/**
1582 *      velocity_free_rd_ring   -       free receive ring
1583 *      @vptr: velocity to clean up
1584 *
1585 *      Free the receive buffers for each ring slot and any
1586 *      attached socket buffers that need to go away.
1587 */
1588static void velocity_free_rd_ring(struct velocity_info *vptr)
1589{
1590        int i;
1591
1592        if (vptr->rx.info == NULL)
1593                return;
1594
1595        for (i = 0; i < vptr->options.numrx; i++) {
1596                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1597                struct rx_desc *rd = vptr->rx.ring + i;
1598
1599                memset(rd, 0, sizeof(*rd));
1600
1601                if (!rd_info->skb)
1602                        continue;
1603                dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1604                                 DMA_FROM_DEVICE);
1605                rd_info->skb_dma = 0;
1606
1607                dev_kfree_skb(rd_info->skb);
1608                rd_info->skb = NULL;
1609        }
1610
1611        kfree(vptr->rx.info);
1612        vptr->rx.info = NULL;
1613}
1614
1615/**
1616 *      velocity_init_rd_ring   -       set up receive ring
1617 *      @vptr: velocity to configure
1618 *
1619 *      Allocate and set up the receive buffers for each ring slot and
1620 *      assign them to the network adapter.
1621 */
1622static int velocity_init_rd_ring(struct velocity_info *vptr)
1623{
1624        int ret = -ENOMEM;
1625
1626        vptr->rx.info = kcalloc(vptr->options.numrx,
1627                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1628        if (!vptr->rx.info)
1629                goto out;
1630
1631        velocity_init_rx_ring_indexes(vptr);
1632
1633        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1634                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1635                        "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
1636                velocity_free_rd_ring(vptr);
1637                goto out;
1638        }
1639
1640        ret = 0;
1641out:
1642        return ret;
1643}
1644
1645/**
1646 *      velocity_init_td_ring   -       set up transmit ring
1647 *      @vptr:  velocity
1648 *
1649 *      Set up the transmit ring and chain the ring pointers together.
1650 *      Returns zero on success or a negative posix errno code for
1651 *      failure.
1652 */
1653static int velocity_init_td_ring(struct velocity_info *vptr)
1654{
1655        int j;
1656
1657        /* Init the TD ring entries */
1658        for (j = 0; j < vptr->tx.numq; j++) {
1659
1660                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1661                                            sizeof(struct velocity_td_info),
1662                                            GFP_KERNEL);
1663                if (!vptr->tx.infos[j]) {
1664                        while (--j >= 0)
1665                                kfree(vptr->tx.infos[j]);
1666                        return -ENOMEM;
1667                }
1668
1669                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1670        }
1671        return 0;
1672}
1673
1674/**
1675 *      velocity_free_dma_rings -       free PCI ring pointers
1676 *      @vptr: Velocity to free from
1677 *
1678 *      Clean up the PCI ring buffers allocated to this velocity.
1679 */
1680static void velocity_free_dma_rings(struct velocity_info *vptr)
1681{
1682        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1683                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1684
1685        dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1686}
1687
1688static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1689{
1690        int ret;
1691
1692        velocity_set_rxbufsize(vptr, mtu);
1693
1694        ret = velocity_init_dma_rings(vptr);
1695        if (ret < 0)
1696                goto out;
1697
1698        ret = velocity_init_rd_ring(vptr);
1699        if (ret < 0)
1700                goto err_free_dma_rings_0;
1701
1702        ret = velocity_init_td_ring(vptr);
1703        if (ret < 0)
1704                goto err_free_rd_ring_1;
1705out:
1706        return ret;
1707
1708err_free_rd_ring_1:
1709        velocity_free_rd_ring(vptr);
1710err_free_dma_rings_0:
1711        velocity_free_dma_rings(vptr);
1712        goto out;
1713}
1714
1715/**
1716 *      velocity_free_tx_buf    -       free transmit buffer
1717 *      @vptr: velocity
1718 *      @tdinfo: buffer
1719 *
1720 *      Release an transmit buffer. If the buffer was preallocated then
1721 *      recycle it, if not then unmap the buffer.
1722 */
1723static void velocity_free_tx_buf(struct velocity_info *vptr,
1724                struct velocity_td_info *tdinfo, struct tx_desc *td)
1725{
1726        struct sk_buff *skb = tdinfo->skb;
1727        int i;
1728
1729        /*
1730         *      Don't unmap the pre-allocated tx_bufs
1731         */
1732        for (i = 0; i < tdinfo->nskb_dma; i++) {
1733                size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1734
1735                /* For scatter-gather */
1736                if (skb_shinfo(skb)->nr_frags > 0)
1737                        pktlen = max_t(size_t, pktlen,
1738                                       td->td_buf[i].size & ~TD_QUEUE);
1739
1740                dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1741                                 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1742        }
1743        dev_kfree_skb_irq(skb);
1744        tdinfo->skb = NULL;
1745}
1746
1747/*
1748 *      FIXME: could we merge this with velocity_free_tx_buf ?
1749 */
1750static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1751                                                         int q, int n)
1752{
1753        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1754        int i;
1755
1756        if (td_info == NULL)
1757                return;
1758
1759        if (td_info->skb) {
1760                for (i = 0; i < td_info->nskb_dma; i++) {
1761                        if (td_info->skb_dma[i]) {
1762                                dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1763                                        td_info->skb->len, DMA_TO_DEVICE);
1764                                td_info->skb_dma[i] = 0;
1765                        }
1766                }
1767                dev_kfree_skb(td_info->skb);
1768                td_info->skb = NULL;
1769        }
1770}
1771
1772/**
1773 *      velocity_free_td_ring   -       free td ring
1774 *      @vptr: velocity
1775 *
1776 *      Free up the transmit ring for this particular velocity adapter.
1777 *      We free the ring contents but not the ring itself.
1778 */
1779static void velocity_free_td_ring(struct velocity_info *vptr)
1780{
1781        int i, j;
1782
1783        for (j = 0; j < vptr->tx.numq; j++) {
1784                if (vptr->tx.infos[j] == NULL)
1785                        continue;
1786                for (i = 0; i < vptr->options.numtx; i++)
1787                        velocity_free_td_ring_entry(vptr, j, i);
1788
1789                kfree(vptr->tx.infos[j]);
1790                vptr->tx.infos[j] = NULL;
1791        }
1792}
1793
1794static void velocity_free_rings(struct velocity_info *vptr)
1795{
1796        velocity_free_td_ring(vptr);
1797        velocity_free_rd_ring(vptr);
1798        velocity_free_dma_rings(vptr);
1799}
1800
1801/**
1802 *      velocity_error  -       handle error from controller
1803 *      @vptr: velocity
1804 *      @status: card status
1805 *
1806 *      Process an error report from the hardware and attempt to recover
1807 *      the card itself. At the moment we cannot recover from some
1808 *      theoretically impossible errors but this could be fixed using
1809 *      the pci_device_failed logic to bounce the hardware
1810 *
1811 */
1812static void velocity_error(struct velocity_info *vptr, int status)
1813{
1814
1815        if (status & ISR_TXSTLI) {
1816                struct mac_regs __iomem *regs = vptr->mac_regs;
1817
1818                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1819                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1820                writew(TRDCSR_RUN, &regs->TDCSRClr);
1821                netif_stop_queue(vptr->netdev);
1822
1823                /* FIXME: port over the pci_device_failed code and use it
1824                   here */
1825        }
1826
1827        if (status & ISR_SRCI) {
1828                struct mac_regs __iomem *regs = vptr->mac_regs;
1829                int linked;
1830
1831                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1832                        vptr->mii_status = check_connection_type(regs);
1833
1834                        /*
1835                         *      If it is a 3119, disable frame bursting in
1836                         *      halfduplex mode and enable it in fullduplex
1837                         *       mode
1838                         */
1839                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1840                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1841                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1842                                else
1843                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1844                        }
1845                        /*
1846                         *      Only enable CD heart beat counter in 10HD mode
1847                         */
1848                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1849                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1850                        else
1851                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1852
1853                        setup_queue_timers(vptr);
1854                }
1855                /*
1856                 *      Get link status from PHYSR0
1857                 */
1858                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1859
1860                if (linked) {
1861                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1862                        netif_carrier_on(vptr->netdev);
1863                } else {
1864                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1865                        netif_carrier_off(vptr->netdev);
1866                }
1867
1868                velocity_print_link_status(vptr);
1869                enable_flow_control_ability(vptr);
1870
1871                /*
1872                 *      Re-enable auto-polling because SRCI will disable
1873                 *      auto-polling
1874                 */
1875
1876                enable_mii_autopoll(regs);
1877
1878                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1879                        netif_stop_queue(vptr->netdev);
1880                else
1881                        netif_wake_queue(vptr->netdev);
1882
1883        }
1884        if (status & ISR_MIBFI)
1885                velocity_update_hw_mibs(vptr);
1886        if (status & ISR_LSTEI)
1887                mac_rx_queue_wake(vptr->mac_regs);
1888}
1889
1890/**
1891 *      tx_srv          -       transmit interrupt service
1892 *      @vptr; Velocity
1893 *
1894 *      Scan the queues looking for transmitted packets that
1895 *      we can complete and clean up. Update any statistics as
1896 *      necessary/
1897 */
1898static int velocity_tx_srv(struct velocity_info *vptr)
1899{
1900        struct tx_desc *td;
1901        int qnum;
1902        int full = 0;
1903        int idx;
1904        int works = 0;
1905        struct velocity_td_info *tdinfo;
1906        struct net_device_stats *stats = &vptr->netdev->stats;
1907
1908        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1909                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1910                        idx = (idx + 1) % vptr->options.numtx) {
1911
1912                        /*
1913                         *      Get Tx Descriptor
1914                         */
1915                        td = &(vptr->tx.rings[qnum][idx]);
1916                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1917
1918                        if (td->tdesc0.len & OWNED_BY_NIC)
1919                                break;
1920
1921                        if ((works++ > 15))
1922                                break;
1923
1924                        if (td->tdesc0.TSR & TSR0_TERR) {
1925                                stats->tx_errors++;
1926                                stats->tx_dropped++;
1927                                if (td->tdesc0.TSR & TSR0_CDH)
1928                                        stats->tx_heartbeat_errors++;
1929                                if (td->tdesc0.TSR & TSR0_CRS)
1930                                        stats->tx_carrier_errors++;
1931                                if (td->tdesc0.TSR & TSR0_ABT)
1932                                        stats->tx_aborted_errors++;
1933                                if (td->tdesc0.TSR & TSR0_OWC)
1934                                        stats->tx_window_errors++;
1935                        } else {
1936                                stats->tx_packets++;
1937                                stats->tx_bytes += tdinfo->skb->len;
1938                        }
1939                        velocity_free_tx_buf(vptr, tdinfo, td);
1940                        vptr->tx.used[qnum]--;
1941                }
1942                vptr->tx.tail[qnum] = idx;
1943
1944                if (AVAIL_TD(vptr, qnum) < 1)
1945                        full = 1;
1946        }
1947        /*
1948         *      Look to see if we should kick the transmit network
1949         *      layer for more work.
1950         */
1951        if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1952            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1953                netif_wake_queue(vptr->netdev);
1954        }
1955        return works;
1956}
1957
1958/**
1959 *      velocity_rx_csum        -       checksum process
1960 *      @rd: receive packet descriptor
1961 *      @skb: network layer packet buffer
1962 *
1963 *      Process the status bits for the received packet and determine
1964 *      if the checksum was computed and verified by the hardware
1965 */
1966static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1967{
1968        skb_checksum_none_assert(skb);
1969
1970        if (rd->rdesc1.CSM & CSM_IPKT) {
1971                if (rd->rdesc1.CSM & CSM_IPOK) {
1972                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1973                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1974                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1975                                        return;
1976                        }
1977                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1978                }
1979        }
1980}
1981
1982/**
1983 *      velocity_rx_copy        -       in place Rx copy for small packets
1984 *      @rx_skb: network layer packet buffer candidate
1985 *      @pkt_size: received data size
1986 *      @rd: receive packet descriptor
1987 *      @dev: network device
1988 *
1989 *      Replace the current skb that is scheduled for Rx processing by a
1990 *      shorter, immediately allocated skb, if the received packet is small
1991 *      enough. This function returns a negative value if the received
1992 *      packet is too big or if memory is exhausted.
1993 */
1994static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1995                            struct velocity_info *vptr)
1996{
1997        int ret = -1;
1998        if (pkt_size < rx_copybreak) {
1999                struct sk_buff *new_skb;
2000
2001                new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2002                if (new_skb) {
2003                        new_skb->ip_summed = rx_skb[0]->ip_summed;
2004                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2005                        *rx_skb = new_skb;
2006                        ret = 0;
2007                }
2008
2009        }
2010        return ret;
2011}
2012
2013/**
2014 *      velocity_iph_realign    -       IP header alignment
2015 *      @vptr: velocity we are handling
2016 *      @skb: network layer packet buffer
2017 *      @pkt_size: received data size
2018 *
2019 *      Align IP header on a 2 bytes boundary. This behavior can be
2020 *      configured by the user.
2021 */
2022static inline void velocity_iph_realign(struct velocity_info *vptr,
2023                                        struct sk_buff *skb, int pkt_size)
2024{
2025        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2026                memmove(skb->data + 2, skb->data, pkt_size);
2027                skb_reserve(skb, 2);
2028        }
2029}
2030
2031/**
2032 *      velocity_receive_frame  -       received packet processor
2033 *      @vptr: velocity we are handling
2034 *      @idx: ring index
2035 *
2036 *      A packet has arrived. We process the packet and if appropriate
2037 *      pass the frame up the network stack
2038 */
2039static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2040{
2041        struct net_device_stats *stats = &vptr->netdev->stats;
2042        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2043        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2044        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2045        struct sk_buff *skb;
2046
2047        if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2048                if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2049                        VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2050                stats->rx_length_errors++;
2051                return -EINVAL;
2052        }
2053
2054        if (rd->rdesc0.RSR & RSR_MAR)
2055                stats->multicast++;
2056
2057        skb = rd_info->skb;
2058
2059        dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2060                                    vptr->rx.buf_sz, DMA_FROM_DEVICE);
2061
2062        velocity_rx_csum(rd, skb);
2063
2064        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2065                velocity_iph_realign(vptr, skb, pkt_len);
2066                rd_info->skb = NULL;
2067                dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2068                                 DMA_FROM_DEVICE);
2069        } else {
2070                dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2071                                           vptr->rx.buf_sz, DMA_FROM_DEVICE);
2072        }
2073
2074        skb_put(skb, pkt_len - 4);
2075        skb->protocol = eth_type_trans(skb, vptr->netdev);
2076
2077        if (rd->rdesc0.RSR & RSR_DETAG) {
2078                u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2079
2080                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2081        }
2082        netif_receive_skb(skb);
2083
2084        stats->rx_bytes += pkt_len;
2085        stats->rx_packets++;
2086
2087        return 0;
2088}
2089
2090/**
2091 *      velocity_rx_srv         -       service RX interrupt
2092 *      @vptr: velocity
2093 *
2094 *      Walk the receive ring of the velocity adapter and remove
2095 *      any received packets from the receive queue. Hand the ring
2096 *      slots back to the adapter for reuse.
2097 */
2098static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2099{
2100        struct net_device_stats *stats = &vptr->netdev->stats;
2101        int rd_curr = vptr->rx.curr;
2102        int works = 0;
2103
2104        while (works < budget_left) {
2105                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2106
2107                if (!vptr->rx.info[rd_curr].skb)
2108                        break;
2109
2110                if (rd->rdesc0.len & OWNED_BY_NIC)
2111                        break;
2112
2113                rmb();
2114
2115                /*
2116                 *      Don't drop CE or RL error frame although RXOK is off
2117                 */
2118                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2119                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2120                                stats->rx_dropped++;
2121                } else {
2122                        if (rd->rdesc0.RSR & RSR_CRC)
2123                                stats->rx_crc_errors++;
2124                        if (rd->rdesc0.RSR & RSR_FAE)
2125                                stats->rx_frame_errors++;
2126
2127                        stats->rx_dropped++;
2128                }
2129
2130                rd->size |= RX_INTEN;
2131
2132                rd_curr++;
2133                if (rd_curr >= vptr->options.numrx)
2134                        rd_curr = 0;
2135                works++;
2136        }
2137
2138        vptr->rx.curr = rd_curr;
2139
2140        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2141                velocity_give_many_rx_descs(vptr);
2142
2143        VAR_USED(stats);
2144        return works;
2145}
2146
2147static int velocity_poll(struct napi_struct *napi, int budget)
2148{
2149        struct velocity_info *vptr = container_of(napi,
2150                        struct velocity_info, napi);
2151        unsigned int rx_done;
2152        unsigned long flags;
2153
2154        /*
2155         * Do rx and tx twice for performance (taken from the VIA
2156         * out-of-tree driver).
2157         */
2158        rx_done = velocity_rx_srv(vptr, budget);
2159        spin_lock_irqsave(&vptr->lock, flags);
2160        velocity_tx_srv(vptr);
2161        /* If budget not fully consumed, exit the polling mode */
2162        if (rx_done < budget) {
2163                napi_complete_done(napi, rx_done);
2164                mac_enable_int(vptr->mac_regs);
2165        }
2166        spin_unlock_irqrestore(&vptr->lock, flags);
2167
2168        return rx_done;
2169}
2170
2171/**
2172 *      velocity_intr           -       interrupt callback
2173 *      @irq: interrupt number
2174 *      @dev_instance: interrupting device
2175 *
2176 *      Called whenever an interrupt is generated by the velocity
2177 *      adapter IRQ line. We may not be the source of the interrupt
2178 *      and need to identify initially if we are, and if not exit as
2179 *      efficiently as possible.
2180 */
2181static irqreturn_t velocity_intr(int irq, void *dev_instance)
2182{
2183        struct net_device *dev = dev_instance;
2184        struct velocity_info *vptr = netdev_priv(dev);
2185        u32 isr_status;
2186
2187        spin_lock(&vptr->lock);
2188        isr_status = mac_read_isr(vptr->mac_regs);
2189
2190        /* Not us ? */
2191        if (isr_status == 0) {
2192                spin_unlock(&vptr->lock);
2193                return IRQ_NONE;
2194        }
2195
2196        /* Ack the interrupt */
2197        mac_write_isr(vptr->mac_regs, isr_status);
2198
2199        if (likely(napi_schedule_prep(&vptr->napi))) {
2200                mac_disable_int(vptr->mac_regs);
2201                __napi_schedule(&vptr->napi);
2202        }
2203
2204        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2205                velocity_error(vptr, isr_status);
2206
2207        spin_unlock(&vptr->lock);
2208
2209        return IRQ_HANDLED;
2210}
2211
2212/**
2213 *      velocity_open           -       interface activation callback
2214 *      @dev: network layer device to open
2215 *
2216 *      Called when the network layer brings the interface up. Returns
2217 *      a negative posix error code on failure, or zero on success.
2218 *
2219 *      All the ring allocation and set up is done on open for this
2220 *      adapter to minimise memory usage when inactive
2221 */
2222static int velocity_open(struct net_device *dev)
2223{
2224        struct velocity_info *vptr = netdev_priv(dev);
2225        int ret;
2226
2227        ret = velocity_init_rings(vptr, dev->mtu);
2228        if (ret < 0)
2229                goto out;
2230
2231        /* Ensure chip is running */
2232        velocity_set_power_state(vptr, PCI_D0);
2233
2234        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2235
2236        ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2237                          dev->name, dev);
2238        if (ret < 0) {
2239                /* Power down the chip */
2240                velocity_set_power_state(vptr, PCI_D3hot);
2241                velocity_free_rings(vptr);
2242                goto out;
2243        }
2244
2245        velocity_give_many_rx_descs(vptr);
2246
2247        mac_enable_int(vptr->mac_regs);
2248        netif_start_queue(dev);
2249        napi_enable(&vptr->napi);
2250        vptr->flags |= VELOCITY_FLAGS_OPENED;
2251out:
2252        return ret;
2253}
2254
2255/**
2256 *      velocity_shutdown       -       shut down the chip
2257 *      @vptr: velocity to deactivate
2258 *
2259 *      Shuts down the internal operations of the velocity and
2260 *      disables interrupts, autopolling, transmit and receive
2261 */
2262static void velocity_shutdown(struct velocity_info *vptr)
2263{
2264        struct mac_regs __iomem *regs = vptr->mac_regs;
2265        mac_disable_int(regs);
2266        writel(CR0_STOP, &regs->CR0Set);
2267        writew(0xFFFF, &regs->TDCSRClr);
2268        writeb(0xFF, &regs->RDCSRClr);
2269        safe_disable_mii_autopoll(regs);
2270        mac_clear_isr(regs);
2271}
2272
2273/**
2274 *      velocity_change_mtu     -       MTU change callback
2275 *      @dev: network device
2276 *      @new_mtu: desired MTU
2277 *
2278 *      Handle requests from the networking layer for MTU change on
2279 *      this interface. It gets called on a change by the network layer.
2280 *      Return zero for success or negative posix error code.
2281 */
2282static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2283{
2284        struct velocity_info *vptr = netdev_priv(dev);
2285        int ret = 0;
2286
2287        if (!netif_running(dev)) {
2288                dev->mtu = new_mtu;
2289                goto out_0;
2290        }
2291
2292        if (dev->mtu != new_mtu) {
2293                struct velocity_info *tmp_vptr;
2294                unsigned long flags;
2295                struct rx_info rx;
2296                struct tx_info tx;
2297
2298                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2299                if (!tmp_vptr) {
2300                        ret = -ENOMEM;
2301                        goto out_0;
2302                }
2303
2304                tmp_vptr->netdev = dev;
2305                tmp_vptr->pdev = vptr->pdev;
2306                tmp_vptr->dev = vptr->dev;
2307                tmp_vptr->options = vptr->options;
2308                tmp_vptr->tx.numq = vptr->tx.numq;
2309
2310                ret = velocity_init_rings(tmp_vptr, new_mtu);
2311                if (ret < 0)
2312                        goto out_free_tmp_vptr_1;
2313
2314                napi_disable(&vptr->napi);
2315
2316                spin_lock_irqsave(&vptr->lock, flags);
2317
2318                netif_stop_queue(dev);
2319                velocity_shutdown(vptr);
2320
2321                rx = vptr->rx;
2322                tx = vptr->tx;
2323
2324                vptr->rx = tmp_vptr->rx;
2325                vptr->tx = tmp_vptr->tx;
2326
2327                tmp_vptr->rx = rx;
2328                tmp_vptr->tx = tx;
2329
2330                dev->mtu = new_mtu;
2331
2332                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2333
2334                velocity_give_many_rx_descs(vptr);
2335
2336                napi_enable(&vptr->napi);
2337
2338                mac_enable_int(vptr->mac_regs);
2339                netif_start_queue(dev);
2340
2341                spin_unlock_irqrestore(&vptr->lock, flags);
2342
2343                velocity_free_rings(tmp_vptr);
2344
2345out_free_tmp_vptr_1:
2346                kfree(tmp_vptr);
2347        }
2348out_0:
2349        return ret;
2350}
2351
2352#ifdef CONFIG_NET_POLL_CONTROLLER
2353/**
2354 *  velocity_poll_controller            -       Velocity Poll controller function
2355 *  @dev: network device
2356 *
2357 *
2358 *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2359 *  with interrupts disabled.
2360 */
2361static void velocity_poll_controller(struct net_device *dev)
2362{
2363        disable_irq(dev->irq);
2364        velocity_intr(dev->irq, dev);
2365        enable_irq(dev->irq);
2366}
2367#endif
2368
2369/**
2370 *      velocity_mii_ioctl              -       MII ioctl handler
2371 *      @dev: network device
2372 *      @ifr: the ifreq block for the ioctl
2373 *      @cmd: the command
2374 *
2375 *      Process MII requests made via ioctl from the network layer. These
2376 *      are used by tools like kudzu to interrogate the link state of the
2377 *      hardware
2378 */
2379static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2380{
2381        struct velocity_info *vptr = netdev_priv(dev);
2382        struct mac_regs __iomem *regs = vptr->mac_regs;
2383        unsigned long flags;
2384        struct mii_ioctl_data *miidata = if_mii(ifr);
2385        int err;
2386
2387        switch (cmd) {
2388        case SIOCGMIIPHY:
2389                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2390                break;
2391        case SIOCGMIIREG:
2392                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2393                        return -ETIMEDOUT;
2394                break;
2395        case SIOCSMIIREG:
2396                spin_lock_irqsave(&vptr->lock, flags);
2397                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2398                spin_unlock_irqrestore(&vptr->lock, flags);
2399                check_connection_type(vptr->mac_regs);
2400                if (err)
2401                        return err;
2402                break;
2403        default:
2404                return -EOPNOTSUPP;
2405        }
2406        return 0;
2407}
2408
2409/**
2410 *      velocity_ioctl          -       ioctl entry point
2411 *      @dev: network device
2412 *      @rq: interface request ioctl
2413 *      @cmd: command code
2414 *
2415 *      Called when the user issues an ioctl request to the network
2416 *      device in question. The velocity interface supports MII.
2417 */
2418static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2419{
2420        struct velocity_info *vptr = netdev_priv(dev);
2421        int ret;
2422
2423        /* If we are asked for information and the device is power
2424           saving then we need to bring the device back up to talk to it */
2425
2426        if (!netif_running(dev))
2427                velocity_set_power_state(vptr, PCI_D0);
2428
2429        switch (cmd) {
2430        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2431        case SIOCGMIIREG:       /* Read MII PHY register. */
2432        case SIOCSMIIREG:       /* Write to MII PHY register. */
2433                ret = velocity_mii_ioctl(dev, rq, cmd);
2434                break;
2435
2436        default:
2437                ret = -EOPNOTSUPP;
2438        }
2439        if (!netif_running(dev))
2440                velocity_set_power_state(vptr, PCI_D3hot);
2441
2442
2443        return ret;
2444}
2445
2446/**
2447 *      velocity_get_status     -       statistics callback
2448 *      @dev: network device
2449 *
2450 *      Callback from the network layer to allow driver statistics
2451 *      to be resynchronized with hardware collected state. In the
2452 *      case of the velocity we need to pull the MIB counters from
2453 *      the hardware into the counters before letting the network
2454 *      layer display them.
2455 */
2456static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2457{
2458        struct velocity_info *vptr = netdev_priv(dev);
2459
2460        /* If the hardware is down, don't touch MII */
2461        if (!netif_running(dev))
2462                return &dev->stats;
2463
2464        spin_lock_irq(&vptr->lock);
2465        velocity_update_hw_mibs(vptr);
2466        spin_unlock_irq(&vptr->lock);
2467
2468        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2469        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2470        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2471
2472//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2473        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2474        /* detailed rx_errors: */
2475//  unsigned long   rx_length_errors;
2476//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2477        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2478//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2479//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2480//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2481
2482        /* detailed tx_errors */
2483//  unsigned long   tx_fifo_errors;
2484
2485        return &dev->stats;
2486}
2487
2488/**
2489 *      velocity_close          -       close adapter callback
2490 *      @dev: network device
2491 *
2492 *      Callback from the network layer when the velocity is being
2493 *      deactivated by the network layer
2494 */
2495static int velocity_close(struct net_device *dev)
2496{
2497        struct velocity_info *vptr = netdev_priv(dev);
2498
2499        napi_disable(&vptr->napi);
2500        netif_stop_queue(dev);
2501        velocity_shutdown(vptr);
2502
2503        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2504                velocity_get_ip(vptr);
2505
2506        free_irq(dev->irq, dev);
2507
2508        velocity_free_rings(vptr);
2509
2510        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2511        return 0;
2512}
2513
2514/**
2515 *      velocity_xmit           -       transmit packet callback
2516 *      @skb: buffer to transmit
2517 *      @dev: network device
2518 *
2519 *      Called by the networ layer to request a packet is queued to
2520 *      the velocity. Returns zero on success.
2521 */
2522static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2523                                 struct net_device *dev)
2524{
2525        struct velocity_info *vptr = netdev_priv(dev);
2526        int qnum = 0;
2527        struct tx_desc *td_ptr;
2528        struct velocity_td_info *tdinfo;
2529        unsigned long flags;
2530        int pktlen;
2531        int index, prev;
2532        int i = 0;
2533
2534        if (skb_padto(skb, ETH_ZLEN))
2535                goto out;
2536
2537        /* The hardware can handle at most 7 memory segments, so merge
2538         * the skb if there are more */
2539        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2540                dev_kfree_skb_any(skb);
2541                return NETDEV_TX_OK;
2542        }
2543
2544        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2545                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2546                                skb_headlen(skb);
2547
2548        spin_lock_irqsave(&vptr->lock, flags);
2549
2550        index = vptr->tx.curr[qnum];
2551        td_ptr = &(vptr->tx.rings[qnum][index]);
2552        tdinfo = &(vptr->tx.infos[qnum][index]);
2553
2554        td_ptr->tdesc1.TCR = TCR0_TIC;
2555        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2556
2557        /*
2558         *      Map the linear network buffer into PCI space and
2559         *      add it to the transmit ring.
2560         */
2561        tdinfo->skb = skb;
2562        tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2563                                                                DMA_TO_DEVICE);
2564        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2565        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2566        td_ptr->td_buf[0].pa_high = 0;
2567        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2568
2569        /* Handle fragments */
2570        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2572
2573                tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2574                                                          frag, 0,
2575                                                          skb_frag_size(frag),
2576                                                          DMA_TO_DEVICE);
2577
2578                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2579                td_ptr->td_buf[i + 1].pa_high = 0;
2580                td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2581        }
2582        tdinfo->nskb_dma = i + 1;
2583
2584        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2585
2586        if (skb_vlan_tag_present(skb)) {
2587                td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2588                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2589        }
2590
2591        /*
2592         *      Handle hardware checksum
2593         */
2594        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2595                const struct iphdr *ip = ip_hdr(skb);
2596                if (ip->protocol == IPPROTO_TCP)
2597                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2598                else if (ip->protocol == IPPROTO_UDP)
2599                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2600                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2601        }
2602
2603        prev = index - 1;
2604        if (prev < 0)
2605                prev = vptr->options.numtx - 1;
2606        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2607        vptr->tx.used[qnum]++;
2608        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2609
2610        if (AVAIL_TD(vptr, qnum) < 1)
2611                netif_stop_queue(dev);
2612
2613        td_ptr = &(vptr->tx.rings[qnum][prev]);
2614        td_ptr->td_buf[0].size |= TD_QUEUE;
2615        mac_tx_queue_wake(vptr->mac_regs, qnum);
2616
2617        spin_unlock_irqrestore(&vptr->lock, flags);
2618out:
2619        return NETDEV_TX_OK;
2620}
2621
2622static const struct net_device_ops velocity_netdev_ops = {
2623        .ndo_open               = velocity_open,
2624        .ndo_stop               = velocity_close,
2625        .ndo_start_xmit         = velocity_xmit,
2626        .ndo_get_stats          = velocity_get_stats,
2627        .ndo_validate_addr      = eth_validate_addr,
2628        .ndo_set_mac_address    = eth_mac_addr,
2629        .ndo_set_rx_mode        = velocity_set_multi,
2630        .ndo_change_mtu         = velocity_change_mtu,
2631        .ndo_do_ioctl           = velocity_ioctl,
2632        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2633        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2634#ifdef CONFIG_NET_POLL_CONTROLLER
2635        .ndo_poll_controller = velocity_poll_controller,
2636#endif
2637};
2638
2639/**
2640 *      velocity_init_info      -       init private data
2641 *      @pdev: PCI device
2642 *      @vptr: Velocity info
2643 *      @info: Board type
2644 *
2645 *      Set up the initial velocity_info struct for the device that has been
2646 *      discovered.
2647 */
2648static void velocity_init_info(struct velocity_info *vptr,
2649                                const struct velocity_info_tbl *info)
2650{
2651        vptr->chip_id = info->chip_id;
2652        vptr->tx.numq = info->txqueue;
2653        vptr->multicast_limit = MCAM_SIZE;
2654        spin_lock_init(&vptr->lock);
2655}
2656
2657/**
2658 *      velocity_get_pci_info   -       retrieve PCI info for device
2659 *      @vptr: velocity device
2660 *      @pdev: PCI device it matches
2661 *
2662 *      Retrieve the PCI configuration space data that interests us from
2663 *      the kernel PCI layer
2664 */
2665static int velocity_get_pci_info(struct velocity_info *vptr)
2666{
2667        struct pci_dev *pdev = vptr->pdev;
2668
2669        pci_set_master(pdev);
2670
2671        vptr->ioaddr = pci_resource_start(pdev, 0);
2672        vptr->memaddr = pci_resource_start(pdev, 1);
2673
2674        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2675                dev_err(&pdev->dev,
2676                           "region #0 is not an I/O resource, aborting.\n");
2677                return -EINVAL;
2678        }
2679
2680        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2681                dev_err(&pdev->dev,
2682                           "region #1 is an I/O resource, aborting.\n");
2683                return -EINVAL;
2684        }
2685
2686        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2687                dev_err(&pdev->dev, "region #1 is too small.\n");
2688                return -EINVAL;
2689        }
2690
2691        return 0;
2692}
2693
2694/**
2695 *      velocity_get_platform_info - retrieve platform info for device
2696 *      @vptr: velocity device
2697 *      @pdev: platform device it matches
2698 *
2699 *      Retrieve the Platform configuration data that interests us
2700 */
2701static int velocity_get_platform_info(struct velocity_info *vptr)
2702{
2703        struct resource res;
2704        int ret;
2705
2706        if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2707                vptr->no_eeprom = 1;
2708
2709        ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2710        if (ret) {
2711                dev_err(vptr->dev, "unable to find memory address\n");
2712                return ret;
2713        }
2714
2715        vptr->memaddr = res.start;
2716
2717        if (resource_size(&res) < VELOCITY_IO_SIZE) {
2718                dev_err(vptr->dev, "memory region is too small.\n");
2719                return -EINVAL;
2720        }
2721
2722        return 0;
2723}
2724
2725/**
2726 *      velocity_print_info     -       per driver data
2727 *      @vptr: velocity
2728 *
2729 *      Print per driver data as the kernel driver finds Velocity
2730 *      hardware
2731 */
2732static void velocity_print_info(struct velocity_info *vptr)
2733{
2734        struct net_device *dev = vptr->netdev;
2735
2736        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2737        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2738                dev->name, dev->dev_addr);
2739}
2740
2741static u32 velocity_get_link(struct net_device *dev)
2742{
2743        struct velocity_info *vptr = netdev_priv(dev);
2744        struct mac_regs __iomem *regs = vptr->mac_regs;
2745        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2746}
2747
2748/**
2749 *      velocity_probe - set up discovered velocity device
2750 *      @pdev: PCI device
2751 *      @ent: PCI device table entry that matched
2752 *      @bustype: bus that device is connected to
2753 *
2754 *      Configure a discovered adapter from scratch. Return a negative
2755 *      errno error code on failure paths.
2756 */
2757static int velocity_probe(struct device *dev, int irq,
2758                           const struct velocity_info_tbl *info,
2759                           enum velocity_bus_type bustype)
2760{
2761        static int first = 1;
2762        struct net_device *netdev;
2763        int i;
2764        const char *drv_string;
2765        struct velocity_info *vptr;
2766        struct mac_regs __iomem *regs;
2767        int ret = -ENOMEM;
2768
2769        /* FIXME: this driver, like almost all other ethernet drivers,
2770         * can support more than MAX_UNITS.
2771         */
2772        if (velocity_nics >= MAX_UNITS) {
2773                dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2774                return -ENODEV;
2775        }
2776
2777        netdev = alloc_etherdev(sizeof(struct velocity_info));
2778        if (!netdev)
2779                goto out;
2780
2781        /* Chain it all together */
2782
2783        SET_NETDEV_DEV(netdev, dev);
2784        vptr = netdev_priv(netdev);
2785
2786        if (first) {
2787                printk(KERN_INFO "%s Ver. %s\n",
2788                        VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2789                printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2790                printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2791                first = 0;
2792        }
2793
2794        netdev->irq = irq;
2795        vptr->netdev = netdev;
2796        vptr->dev = dev;
2797
2798        velocity_init_info(vptr, info);
2799
2800        if (bustype == BUS_PCI) {
2801                vptr->pdev = to_pci_dev(dev);
2802
2803                ret = velocity_get_pci_info(vptr);
2804                if (ret < 0)
2805                        goto err_free_dev;
2806        } else {
2807                vptr->pdev = NULL;
2808                ret = velocity_get_platform_info(vptr);
2809                if (ret < 0)
2810                        goto err_free_dev;
2811        }
2812
2813        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2814        if (regs == NULL) {
2815                ret = -EIO;
2816                goto err_free_dev;
2817        }
2818
2819        vptr->mac_regs = regs;
2820        vptr->rev_id = readb(&regs->rev_id);
2821
2822        mac_wol_reset(regs);
2823
2824        for (i = 0; i < 6; i++)
2825                netdev->dev_addr[i] = readb(&regs->PAR[i]);
2826
2827
2828        drv_string = dev_driver_string(dev);
2829
2830        velocity_get_options(&vptr->options, velocity_nics, drv_string);
2831
2832        /*
2833         *      Mask out the options cannot be set to the chip
2834         */
2835
2836        vptr->options.flags &= info->flags;
2837
2838        /*
2839         *      Enable the chip specified capbilities
2840         */
2841
2842        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2843
2844        vptr->wol_opts = vptr->options.wol_opts;
2845        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2846
2847        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2848
2849        netdev->netdev_ops = &velocity_netdev_ops;
2850        netdev->ethtool_ops = &velocity_ethtool_ops;
2851        netif_napi_add(netdev, &vptr->napi, velocity_poll,
2852                                                        VELOCITY_NAPI_WEIGHT);
2853
2854        netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2855                           NETIF_F_HW_VLAN_CTAG_TX;
2856        netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2857                        NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2858                        NETIF_F_IP_CSUM;
2859
2860        /* MTU range: 64 - 9000 */
2861        netdev->min_mtu = VELOCITY_MIN_MTU;
2862        netdev->max_mtu = VELOCITY_MAX_MTU;
2863
2864        ret = register_netdev(netdev);
2865        if (ret < 0)
2866                goto err_iounmap;
2867
2868        if (!velocity_get_link(netdev)) {
2869                netif_carrier_off(netdev);
2870                vptr->mii_status |= VELOCITY_LINK_FAIL;
2871        }
2872
2873        velocity_print_info(vptr);
2874        dev_set_drvdata(vptr->dev, netdev);
2875
2876        /* and leave the chip powered down */
2877
2878        velocity_set_power_state(vptr, PCI_D3hot);
2879        velocity_nics++;
2880out:
2881        return ret;
2882
2883err_iounmap:
2884        netif_napi_del(&vptr->napi);
2885        iounmap(regs);
2886err_free_dev:
2887        free_netdev(netdev);
2888        goto out;
2889}
2890
2891/**
2892 *      velocity_remove - device unplug
2893 *      @dev: device being removed
2894 *
2895 *      Device unload callback. Called on an unplug or on module
2896 *      unload for each active device that is present. Disconnects
2897 *      the device from the network layer and frees all the resources
2898 */
2899static int velocity_remove(struct device *dev)
2900{
2901        struct net_device *netdev = dev_get_drvdata(dev);
2902        struct velocity_info *vptr = netdev_priv(netdev);
2903
2904        unregister_netdev(netdev);
2905        netif_napi_del(&vptr->napi);
2906        iounmap(vptr->mac_regs);
2907        free_netdev(netdev);
2908        velocity_nics--;
2909
2910        return 0;
2911}
2912
2913static int velocity_pci_probe(struct pci_dev *pdev,
2914                               const struct pci_device_id *ent)
2915{
2916        const struct velocity_info_tbl *info =
2917                                        &chip_info_table[ent->driver_data];
2918        int ret;
2919
2920        ret = pci_enable_device(pdev);
2921        if (ret < 0)
2922                return ret;
2923
2924        ret = pci_request_regions(pdev, VELOCITY_NAME);
2925        if (ret < 0) {
2926                dev_err(&pdev->dev, "No PCI resources.\n");
2927                goto fail1;
2928        }
2929
2930        ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2931        if (ret == 0)
2932                return 0;
2933
2934        pci_release_regions(pdev);
2935fail1:
2936        pci_disable_device(pdev);
2937        return ret;
2938}
2939
2940static void velocity_pci_remove(struct pci_dev *pdev)
2941{
2942        velocity_remove(&pdev->dev);
2943
2944        pci_release_regions(pdev);
2945        pci_disable_device(pdev);
2946}
2947
2948static int velocity_platform_probe(struct platform_device *pdev)
2949{
2950        const struct of_device_id *of_id;
2951        const struct velocity_info_tbl *info;
2952        int irq;
2953
2954        of_id = of_match_device(velocity_of_ids, &pdev->dev);
2955        if (!of_id)
2956                return -EINVAL;
2957        info = of_id->data;
2958
2959        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2960        if (!irq)
2961                return -EINVAL;
2962
2963        return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2964}
2965
2966static int velocity_platform_remove(struct platform_device *pdev)
2967{
2968        velocity_remove(&pdev->dev);
2969
2970        return 0;
2971}
2972
2973#ifdef CONFIG_PM_SLEEP
2974/**
2975 *      wol_calc_crc            -       WOL CRC
2976 *      @pattern: data pattern
2977 *      @mask_pattern: mask
2978 *
2979 *      Compute the wake on lan crc hashes for the packet header
2980 *      we are interested in.
2981 */
2982static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2983{
2984        u16 crc = 0xFFFF;
2985        u8 mask;
2986        int i, j;
2987
2988        for (i = 0; i < size; i++) {
2989                mask = mask_pattern[i];
2990
2991                /* Skip this loop if the mask equals to zero */
2992                if (mask == 0x00)
2993                        continue;
2994
2995                for (j = 0; j < 8; j++) {
2996                        if ((mask & 0x01) == 0) {
2997                                mask >>= 1;
2998                                continue;
2999                        }
3000                        mask >>= 1;
3001                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3002                }
3003        }
3004        /*      Finally, invert the result once to get the correct data */
3005        crc = ~crc;
3006        return bitrev32(crc) >> 16;
3007}
3008
3009/**
3010 *      velocity_set_wol        -       set up for wake on lan
3011 *      @vptr: velocity to set WOL status on
3012 *
3013 *      Set a card up for wake on lan either by unicast or by
3014 *      ARP packet.
3015 *
3016 *      FIXME: check static buffer is safe here
3017 */
3018static int velocity_set_wol(struct velocity_info *vptr)
3019{
3020        struct mac_regs __iomem *regs = vptr->mac_regs;
3021        enum speed_opt spd_dpx = vptr->options.spd_dpx;
3022        static u8 buf[256];
3023        int i;
3024
3025        static u32 mask_pattern[2][4] = {
3026                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3027                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
3028        };
3029
3030        writew(0xFFFF, &regs->WOLCRClr);
3031        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3032        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3033
3034        /*
3035           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3036           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3037         */
3038
3039        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3040                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3041
3042        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3043                struct arp_packet *arp = (struct arp_packet *) buf;
3044                u16 crc;
3045                memset(buf, 0, sizeof(struct arp_packet) + 7);
3046
3047                for (i = 0; i < 4; i++)
3048                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3049
3050                arp->type = htons(ETH_P_ARP);
3051                arp->ar_op = htons(1);
3052
3053                memcpy(arp->ar_tip, vptr->ip_addr, 4);
3054
3055                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3056                                (u8 *) & mask_pattern[0][0]);
3057
3058                writew(crc, &regs->PatternCRC[0]);
3059                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3060        }
3061
3062        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3063        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3064
3065        writew(0x0FFF, &regs->WOLSRClr);
3066
3067        if (spd_dpx == SPD_DPX_1000_FULL)
3068                goto mac_done;
3069
3070        if (spd_dpx != SPD_DPX_AUTO)
3071                goto advertise_done;
3072
3073        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3074                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3075                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3076
3077                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3078        }
3079
3080        if (vptr->mii_status & VELOCITY_SPEED_1000)
3081                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3082
3083advertise_done:
3084        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3085
3086        {
3087                u8 GCR;
3088                GCR = readb(&regs->CHIPGCR);
3089                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3090                writeb(GCR, &regs->CHIPGCR);
3091        }
3092
3093mac_done:
3094        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3095        /* Turn on SWPTAG just before entering power mode */
3096        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3097        /* Go to bed ..... */
3098        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3099
3100        return 0;
3101}
3102
3103/**
3104 *      velocity_save_context   -       save registers
3105 *      @vptr: velocity
3106 *      @context: buffer for stored context
3107 *
3108 *      Retrieve the current configuration from the velocity hardware
3109 *      and stash it in the context structure, for use by the context
3110 *      restore functions. This allows us to save things we need across
3111 *      power down states
3112 */
3113static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3114{
3115        struct mac_regs __iomem *regs = vptr->mac_regs;
3116        u16 i;
3117        u8 __iomem *ptr = (u8 __iomem *)regs;
3118
3119        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3120                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3121
3122        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3123                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3124
3125        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3126                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3127
3128}
3129
3130static int velocity_suspend(struct device *dev)
3131{
3132        struct net_device *netdev = dev_get_drvdata(dev);
3133        struct velocity_info *vptr = netdev_priv(netdev);
3134        unsigned long flags;
3135
3136        if (!netif_running(vptr->netdev))
3137                return 0;
3138
3139        netif_device_detach(vptr->netdev);
3140
3141        spin_lock_irqsave(&vptr->lock, flags);
3142        if (vptr->pdev)
3143                pci_save_state(vptr->pdev);
3144
3145        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3146                velocity_get_ip(vptr);
3147                velocity_save_context(vptr, &vptr->context);
3148                velocity_shutdown(vptr);
3149                velocity_set_wol(vptr);
3150                if (vptr->pdev)
3151                        pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3152                velocity_set_power_state(vptr, PCI_D3hot);
3153        } else {
3154                velocity_save_context(vptr, &vptr->context);
3155                velocity_shutdown(vptr);
3156                if (vptr->pdev)
3157                        pci_disable_device(vptr->pdev);
3158                velocity_set_power_state(vptr, PCI_D3hot);
3159        }
3160
3161        spin_unlock_irqrestore(&vptr->lock, flags);
3162        return 0;
3163}
3164
3165/**
3166 *      velocity_restore_context        -       restore registers
3167 *      @vptr: velocity
3168 *      @context: buffer for stored context
3169 *
3170 *      Reload the register configuration from the velocity context
3171 *      created by velocity_save_context.
3172 */
3173static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3174{
3175        struct mac_regs __iomem *regs = vptr->mac_regs;
3176        int i;
3177        u8 __iomem *ptr = (u8 __iomem *)regs;
3178
3179        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3180                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3181
3182        /* Just skip cr0 */
3183        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3184                /* Clear */
3185                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3186                /* Set */
3187                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3188        }
3189
3190        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3191                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3192
3193        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3194                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3195
3196        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3197                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3198}
3199
3200static int velocity_resume(struct device *dev)
3201{
3202        struct net_device *netdev = dev_get_drvdata(dev);
3203        struct velocity_info *vptr = netdev_priv(netdev);
3204        unsigned long flags;
3205        int i;
3206
3207        if (!netif_running(vptr->netdev))
3208                return 0;
3209
3210        velocity_set_power_state(vptr, PCI_D0);
3211
3212        if (vptr->pdev) {
3213                pci_enable_wake(vptr->pdev, PCI_D0, 0);
3214                pci_restore_state(vptr->pdev);
3215        }
3216
3217        mac_wol_reset(vptr->mac_regs);
3218
3219        spin_lock_irqsave(&vptr->lock, flags);
3220        velocity_restore_context(vptr, &vptr->context);
3221        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3222        mac_disable_int(vptr->mac_regs);
3223
3224        velocity_tx_srv(vptr);
3225
3226        for (i = 0; i < vptr->tx.numq; i++) {
3227                if (vptr->tx.used[i])
3228                        mac_tx_queue_wake(vptr->mac_regs, i);
3229        }
3230
3231        mac_enable_int(vptr->mac_regs);
3232        spin_unlock_irqrestore(&vptr->lock, flags);
3233        netif_device_attach(vptr->netdev);
3234
3235        return 0;
3236}
3237#endif  /* CONFIG_PM_SLEEP */
3238
3239static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3240
3241/*
3242 *      Definition for our device driver. The PCI layer interface
3243 *      uses this to handle all our card discover and plugging
3244 */
3245static struct pci_driver velocity_pci_driver = {
3246        .name           = VELOCITY_NAME,
3247        .id_table       = velocity_pci_id_table,
3248        .probe          = velocity_pci_probe,
3249        .remove         = velocity_pci_remove,
3250        .driver = {
3251                .pm = &velocity_pm_ops,
3252        },
3253};
3254
3255static struct platform_driver velocity_platform_driver = {
3256        .probe          = velocity_platform_probe,
3257        .remove         = velocity_platform_remove,
3258        .driver = {
3259                .name = "via-velocity",
3260                .of_match_table = velocity_of_ids,
3261                .pm = &velocity_pm_ops,
3262        },
3263};
3264
3265/**
3266 *      velocity_ethtool_up     -       pre hook for ethtool
3267 *      @dev: network device
3268 *
3269 *      Called before an ethtool operation. We need to make sure the
3270 *      chip is out of D3 state before we poke at it.
3271 */
3272static int velocity_ethtool_up(struct net_device *dev)
3273{
3274        struct velocity_info *vptr = netdev_priv(dev);
3275        if (!netif_running(dev))
3276                velocity_set_power_state(vptr, PCI_D0);
3277        return 0;
3278}
3279
3280/**
3281 *      velocity_ethtool_down   -       post hook for ethtool
3282 *      @dev: network device
3283 *
3284 *      Called after an ethtool operation. Restore the chip back to D3
3285 *      state if it isn't running.
3286 */
3287static void velocity_ethtool_down(struct net_device *dev)
3288{
3289        struct velocity_info *vptr = netdev_priv(dev);
3290        if (!netif_running(dev))
3291                velocity_set_power_state(vptr, PCI_D3hot);
3292}
3293
3294static int velocity_get_link_ksettings(struct net_device *dev,
3295                                       struct ethtool_link_ksettings *cmd)
3296{
3297        struct velocity_info *vptr = netdev_priv(dev);
3298        struct mac_regs __iomem *regs = vptr->mac_regs;
3299        u32 status;
3300        u32 supported, advertising;
3301
3302        status = check_connection_type(vptr->mac_regs);
3303
3304        supported = SUPPORTED_TP |
3305                        SUPPORTED_Autoneg |
3306                        SUPPORTED_10baseT_Half |
3307                        SUPPORTED_10baseT_Full |
3308                        SUPPORTED_100baseT_Half |
3309                        SUPPORTED_100baseT_Full |
3310                        SUPPORTED_1000baseT_Half |
3311                        SUPPORTED_1000baseT_Full;
3312
3313        advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3314        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3315                advertising |=
3316                        ADVERTISED_10baseT_Half |
3317                        ADVERTISED_10baseT_Full |
3318                        ADVERTISED_100baseT_Half |
3319                        ADVERTISED_100baseT_Full |
3320                        ADVERTISED_1000baseT_Half |
3321                        ADVERTISED_1000baseT_Full;
3322        } else {
3323                switch (vptr->options.spd_dpx) {
3324                case SPD_DPX_1000_FULL:
3325                        advertising |= ADVERTISED_1000baseT_Full;
3326                        break;
3327                case SPD_DPX_100_HALF:
3328                        advertising |= ADVERTISED_100baseT_Half;
3329                        break;
3330                case SPD_DPX_100_FULL:
3331                        advertising |= ADVERTISED_100baseT_Full;
3332                        break;
3333                case SPD_DPX_10_HALF:
3334                        advertising |= ADVERTISED_10baseT_Half;
3335                        break;
3336                case SPD_DPX_10_FULL:
3337                        advertising |= ADVERTISED_10baseT_Full;
3338                        break;
3339                default:
3340                        break;
3341                }
3342        }
3343
3344        if (status & VELOCITY_SPEED_1000)
3345                cmd->base.speed = SPEED_1000;
3346        else if (status & VELOCITY_SPEED_100)
3347                cmd->base.speed = SPEED_100;
3348        else
3349                cmd->base.speed = SPEED_10;
3350
3351        cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3352                AUTONEG_ENABLE : AUTONEG_DISABLE;
3353        cmd->base.port = PORT_TP;
3354        cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3355
3356        if (status & VELOCITY_DUPLEX_FULL)
3357                cmd->base.duplex = DUPLEX_FULL;
3358        else
3359                cmd->base.duplex = DUPLEX_HALF;
3360
3361        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3362                                                supported);
3363        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3364                                                advertising);
3365
3366        return 0;
3367}
3368
3369static int velocity_set_link_ksettings(struct net_device *dev,
3370                                       const struct ethtool_link_ksettings *cmd)
3371{
3372        struct velocity_info *vptr = netdev_priv(dev);
3373        u32 speed = cmd->base.speed;
3374        u32 curr_status;
3375        u32 new_status = 0;
3376        int ret = 0;
3377
3378        curr_status = check_connection_type(vptr->mac_regs);
3379        curr_status &= (~VELOCITY_LINK_FAIL);
3380
3381        new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3382        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3383        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3384        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3385        new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3386                       VELOCITY_DUPLEX_FULL : 0);
3387
3388        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3389            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3390                ret = -EINVAL;
3391        } else {
3392                enum speed_opt spd_dpx;
3393
3394                if (new_status & VELOCITY_AUTONEG_ENABLE)
3395                        spd_dpx = SPD_DPX_AUTO;
3396                else if ((new_status & VELOCITY_SPEED_1000) &&
3397                         (new_status & VELOCITY_DUPLEX_FULL)) {
3398                        spd_dpx = SPD_DPX_1000_FULL;
3399                } else if (new_status & VELOCITY_SPEED_100)
3400                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3401                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3402                else if (new_status & VELOCITY_SPEED_10)
3403                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3404                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3405                else
3406                        return -EOPNOTSUPP;
3407
3408                vptr->options.spd_dpx = spd_dpx;
3409
3410                velocity_set_media_mode(vptr, new_status);
3411        }
3412
3413        return ret;
3414}
3415
3416static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3417{
3418        struct velocity_info *vptr = netdev_priv(dev);
3419
3420        strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3421        strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3422        if (vptr->pdev)
3423                strlcpy(info->bus_info, pci_name(vptr->pdev),
3424                                                sizeof(info->bus_info));
3425        else
3426                strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3427}
3428
3429static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3430{
3431        struct velocity_info *vptr = netdev_priv(dev);
3432        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3433        wol->wolopts |= WAKE_MAGIC;
3434        /*
3435           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3436                   wol.wolopts|=WAKE_PHY;
3437                         */
3438        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3439                wol->wolopts |= WAKE_UCAST;
3440        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3441                wol->wolopts |= WAKE_ARP;
3442        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3443}
3444
3445static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3446{
3447        struct velocity_info *vptr = netdev_priv(dev);
3448
3449        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3450                return -EFAULT;
3451        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3452
3453        /*
3454           if (wol.wolopts & WAKE_PHY) {
3455           vptr->wol_opts|=VELOCITY_WOL_PHY;
3456           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3457           }
3458         */
3459
3460        if (wol->wolopts & WAKE_MAGIC) {
3461                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3462                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3463        }
3464        if (wol->wolopts & WAKE_UCAST) {
3465                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3466                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3467        }
3468        if (wol->wolopts & WAKE_ARP) {
3469                vptr->wol_opts |= VELOCITY_WOL_ARP;
3470                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3471        }
3472        memcpy(vptr->wol_passwd, wol->sopass, 6);
3473        return 0;
3474}
3475
3476static u32 velocity_get_msglevel(struct net_device *dev)
3477{
3478        return msglevel;
3479}
3480
3481static void velocity_set_msglevel(struct net_device *dev, u32 value)
3482{
3483         msglevel = value;
3484}
3485
3486static int get_pending_timer_val(int val)
3487{
3488        int mult_bits = val >> 6;
3489        int mult = 1;
3490
3491        switch (mult_bits)
3492        {
3493        case 1:
3494                mult = 4; break;
3495        case 2:
3496                mult = 16; break;
3497        case 3:
3498                mult = 64; break;
3499        case 0:
3500        default:
3501                break;
3502        }
3503
3504        return (val & 0x3f) * mult;
3505}
3506
3507static void set_pending_timer_val(int *val, u32 us)
3508{
3509        u8 mult = 0;
3510        u8 shift = 0;
3511
3512        if (us >= 0x3f) {
3513                mult = 1; /* mult with 4 */
3514                shift = 2;
3515        }
3516        if (us >= 0x3f * 4) {
3517                mult = 2; /* mult with 16 */
3518                shift = 4;
3519        }
3520        if (us >= 0x3f * 16) {
3521                mult = 3; /* mult with 64 */
3522                shift = 6;
3523        }
3524
3525        *val = (mult << 6) | ((us >> shift) & 0x3f);
3526}
3527
3528
3529static int velocity_get_coalesce(struct net_device *dev,
3530                struct ethtool_coalesce *ecmd)
3531{
3532        struct velocity_info *vptr = netdev_priv(dev);
3533
3534        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3535        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3536
3537        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3538        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3539
3540        return 0;
3541}
3542
3543static int velocity_set_coalesce(struct net_device *dev,
3544                struct ethtool_coalesce *ecmd)
3545{
3546        struct velocity_info *vptr = netdev_priv(dev);
3547        int max_us = 0x3f * 64;
3548        unsigned long flags;
3549
3550        /* 6 bits of  */
3551        if (ecmd->tx_coalesce_usecs > max_us)
3552                return -EINVAL;
3553        if (ecmd->rx_coalesce_usecs > max_us)
3554                return -EINVAL;
3555
3556        if (ecmd->tx_max_coalesced_frames > 0xff)
3557                return -EINVAL;
3558        if (ecmd->rx_max_coalesced_frames > 0xff)
3559                return -EINVAL;
3560
3561        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3562        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3563
3564        set_pending_timer_val(&vptr->options.rxqueue_timer,
3565                        ecmd->rx_coalesce_usecs);
3566        set_pending_timer_val(&vptr->options.txqueue_timer,
3567                        ecmd->tx_coalesce_usecs);
3568
3569        /* Setup the interrupt suppression and queue timers */
3570        spin_lock_irqsave(&vptr->lock, flags);
3571        mac_disable_int(vptr->mac_regs);
3572        setup_adaptive_interrupts(vptr);
3573        setup_queue_timers(vptr);
3574
3575        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3576        mac_clear_isr(vptr->mac_regs);
3577        mac_enable_int(vptr->mac_regs);
3578        spin_unlock_irqrestore(&vptr->lock, flags);
3579
3580        return 0;
3581}
3582
3583static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3584        "rx_all",
3585        "rx_ok",
3586        "tx_ok",
3587        "rx_error",
3588        "rx_runt_ok",
3589        "rx_runt_err",
3590        "rx_64",
3591        "tx_64",
3592        "rx_65_to_127",
3593        "tx_65_to_127",
3594        "rx_128_to_255",
3595        "tx_128_to_255",
3596        "rx_256_to_511",
3597        "tx_256_to_511",
3598        "rx_512_to_1023",
3599        "tx_512_to_1023",
3600        "rx_1024_to_1518",
3601        "tx_1024_to_1518",
3602        "tx_ether_collisions",
3603        "rx_crc_errors",
3604        "rx_jumbo",
3605        "tx_jumbo",
3606        "rx_mac_control_frames",
3607        "tx_mac_control_frames",
3608        "rx_frame_alignement_errors",
3609        "rx_long_ok",
3610        "rx_long_err",
3611        "tx_sqe_errors",
3612        "rx_no_buf",
3613        "rx_symbol_errors",
3614        "in_range_length_errors",
3615        "late_collisions"
3616};
3617
3618static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3619{
3620        switch (sset) {
3621        case ETH_SS_STATS:
3622                memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3623                break;
3624        }
3625}
3626
3627static int velocity_get_sset_count(struct net_device *dev, int sset)
3628{
3629        switch (sset) {
3630        case ETH_SS_STATS:
3631                return ARRAY_SIZE(velocity_gstrings);
3632        default:
3633                return -EOPNOTSUPP;
3634        }
3635}
3636
3637static void velocity_get_ethtool_stats(struct net_device *dev,
3638                                       struct ethtool_stats *stats, u64 *data)
3639{
3640        if (netif_running(dev)) {
3641                struct velocity_info *vptr = netdev_priv(dev);
3642                u32 *p = vptr->mib_counter;
3643                int i;
3644
3645                spin_lock_irq(&vptr->lock);
3646                velocity_update_hw_mibs(vptr);
3647                spin_unlock_irq(&vptr->lock);
3648
3649                for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3650                        *data++ = *p++;
3651        }
3652}
3653
3654static const struct ethtool_ops velocity_ethtool_ops = {
3655        .get_drvinfo            = velocity_get_drvinfo,
3656        .get_wol                = velocity_ethtool_get_wol,
3657        .set_wol                = velocity_ethtool_set_wol,
3658        .get_msglevel           = velocity_get_msglevel,
3659        .set_msglevel           = velocity_set_msglevel,
3660        .get_link               = velocity_get_link,
3661        .get_strings            = velocity_get_strings,
3662        .get_sset_count         = velocity_get_sset_count,
3663        .get_ethtool_stats      = velocity_get_ethtool_stats,
3664        .get_coalesce           = velocity_get_coalesce,
3665        .set_coalesce           = velocity_set_coalesce,
3666        .begin                  = velocity_ethtool_up,
3667        .complete               = velocity_ethtool_down,
3668        .get_link_ksettings     = velocity_get_link_ksettings,
3669        .set_link_ksettings     = velocity_set_link_ksettings,
3670};
3671
3672#if defined(CONFIG_PM) && defined(CONFIG_INET)
3673static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3674{
3675        struct in_ifaddr *ifa = ptr;
3676        struct net_device *dev = ifa->ifa_dev->dev;
3677
3678        if (dev_net(dev) == &init_net &&
3679            dev->netdev_ops == &velocity_netdev_ops)
3680                velocity_get_ip(netdev_priv(dev));
3681
3682        return NOTIFY_DONE;
3683}
3684
3685static struct notifier_block velocity_inetaddr_notifier = {
3686        .notifier_call  = velocity_netdev_event,
3687};
3688
3689static void velocity_register_notifier(void)
3690{
3691        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3692}
3693
3694static void velocity_unregister_notifier(void)
3695{
3696        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3697}
3698
3699#else
3700
3701#define velocity_register_notifier()    do {} while (0)
3702#define velocity_unregister_notifier()  do {} while (0)
3703
3704#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3705
3706/**
3707 *      velocity_init_module    -       load time function
3708 *
3709 *      Called when the velocity module is loaded. The PCI driver
3710 *      is registered with the PCI layer, and in turn will call
3711 *      the probe functions for each velocity adapter installed
3712 *      in the system.
3713 */
3714static int __init velocity_init_module(void)
3715{
3716        int ret_pci, ret_platform;
3717
3718        velocity_register_notifier();
3719
3720        ret_pci = pci_register_driver(&velocity_pci_driver);
3721        ret_platform = platform_driver_register(&velocity_platform_driver);
3722
3723        /* if both_registers failed, remove the notifier */
3724        if ((ret_pci < 0) && (ret_platform < 0)) {
3725                velocity_unregister_notifier();
3726                return ret_pci;
3727        }
3728
3729        return 0;
3730}
3731
3732/**
3733 *      velocity_cleanup        -       module unload
3734 *
3735 *      When the velocity hardware is unloaded this function is called.
3736 *      It will clean up the notifiers and the unregister the PCI
3737 *      driver interface for this hardware. This in turn cleans up
3738 *      all discovered interfaces before returning from the function
3739 */
3740static void __exit velocity_cleanup_module(void)
3741{
3742        velocity_unregister_notifier();
3743
3744        pci_unregister_driver(&velocity_pci_driver);
3745        platform_driver_unregister(&velocity_platform_driver);
3746}
3747
3748module_init(velocity_init_module);
3749module_exit(velocity_cleanup_module);
3750