linux/drivers/net/ethernet/via/via-velocity.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This code is derived from the VIA reference driver (copyright message
   4 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   5 * addition to the Linux kernel.
   6 *
   7 * The code has been merged into one source file, cleaned up to follow
   8 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   9 * for 64bit hardware platforms.
  10 *
  11 * TODO
  12 *      rx_copybreak/alignment
  13 *      More testing
  14 *
  15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  16 * Additional fixes and clean up: Francois Romieu
  17 *
  18 * This source has not been verified for use in safety critical systems.
  19 *
  20 * Please direct queries about the revamped driver to the linux-kernel
  21 * list not VIA.
  22 *
  23 * Original code:
  24 *
  25 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  26 * All rights reserved.
  27 *
  28 * Author: Chuang Liang-Shing, AJ Jiang
  29 *
  30 * Date: Jan 24, 2003
  31 *
  32 * MODULE_LICENSE("GPL");
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/bitops.h>
  40#include <linux/init.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/mm.h>
  43#include <linux/errno.h>
  44#include <linux/ioport.h>
  45#include <linux/pci.h>
  46#include <linux/kernel.h>
  47#include <linux/netdevice.h>
  48#include <linux/etherdevice.h>
  49#include <linux/skbuff.h>
  50#include <linux/delay.h>
  51#include <linux/timer.h>
  52#include <linux/slab.h>
  53#include <linux/interrupt.h>
  54#include <linux/string.h>
  55#include <linux/wait.h>
  56#include <linux/io.h>
  57#include <linux/if.h>
  58#include <linux/uaccess.h>
  59#include <linux/proc_fs.h>
  60#include <linux/of_address.h>
  61#include <linux/of_device.h>
  62#include <linux/of_irq.h>
  63#include <linux/inetdevice.h>
  64#include <linux/platform_device.h>
  65#include <linux/reboot.h>
  66#include <linux/ethtool.h>
  67#include <linux/mii.h>
  68#include <linux/in.h>
  69#include <linux/if_arp.h>
  70#include <linux/if_vlan.h>
  71#include <linux/ip.h>
  72#include <linux/tcp.h>
  73#include <linux/udp.h>
  74#include <linux/crc-ccitt.h>
  75#include <linux/crc32.h>
  76
  77#include "via-velocity.h"
  78
  79enum velocity_bus_type {
  80        BUS_PCI,
  81        BUS_PLATFORM,
  82};
  83
  84static int velocity_nics;
  85
  86static void velocity_set_power_state(struct velocity_info *vptr, char state)
  87{
  88        void *addr = vptr->mac_regs;
  89
  90        if (vptr->pdev)
  91                pci_set_power_state(vptr->pdev, state);
  92        else
  93                writeb(state, addr + 0x154);
  94}
  95
  96/**
  97 *      mac_get_cam_mask        -       Read a CAM mask
  98 *      @regs: register block for this velocity
  99 *      @mask: buffer to store mask
 100 *
 101 *      Fetch the mask bits of the selected CAM and store them into the
 102 *      provided mask buffer.
 103 */
 104static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 105{
 106        int i;
 107
 108        /* Select CAM mask */
 109        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 110
 111        writeb(0, &regs->CAMADDR);
 112
 113        /* read mask */
 114        for (i = 0; i < 8; i++)
 115                *mask++ = readb(&(regs->MARCAM[i]));
 116
 117        /* disable CAMEN */
 118        writeb(0, &regs->CAMADDR);
 119
 120        /* Select mar */
 121        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 122}
 123
 124/**
 125 *      mac_set_cam_mask        -       Set a CAM mask
 126 *      @regs: register block for this velocity
 127 *      @mask: CAM mask to load
 128 *
 129 *      Store a new mask into a CAM
 130 */
 131static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 132{
 133        int i;
 134        /* Select CAM mask */
 135        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 136
 137        writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 138
 139        for (i = 0; i < 8; i++)
 140                writeb(*mask++, &(regs->MARCAM[i]));
 141
 142        /* disable CAMEN */
 143        writeb(0, &regs->CAMADDR);
 144
 145        /* Select mar */
 146        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 147}
 148
 149static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 150{
 151        int i;
 152        /* Select CAM mask */
 153        BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 154
 155        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 156
 157        for (i = 0; i < 8; i++)
 158                writeb(*mask++, &(regs->MARCAM[i]));
 159
 160        /* disable CAMEN */
 161        writeb(0, &regs->CAMADDR);
 162
 163        /* Select mar */
 164        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 165}
 166
 167/**
 168 *      mac_set_cam     -       set CAM data
 169 *      @regs: register block of this velocity
 170 *      @idx: Cam index
 171 *      @addr: 2 or 6 bytes of CAM data
 172 *
 173 *      Load an address or vlan tag into a CAM
 174 */
 175static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 176{
 177        int i;
 178
 179        /* Select CAM mask */
 180        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 181
 182        idx &= (64 - 1);
 183
 184        writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 185
 186        for (i = 0; i < 6; i++)
 187                writeb(*addr++, &(regs->MARCAM[i]));
 188
 189        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 190
 191        udelay(10);
 192
 193        writeb(0, &regs->CAMADDR);
 194
 195        /* Select mar */
 196        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 197}
 198
 199static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 200                             const u8 *addr)
 201{
 202
 203        /* Select CAM mask */
 204        BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 205
 206        idx &= (64 - 1);
 207
 208        writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 209        writew(*((u16 *) addr), &regs->MARCAM[0]);
 210
 211        BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 212
 213        udelay(10);
 214
 215        writeb(0, &regs->CAMADDR);
 216
 217        /* Select mar */
 218        BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 219}
 220
 221
 222/**
 223 *      mac_wol_reset   -       reset WOL after exiting low power
 224 *      @regs: register block of this velocity
 225 *
 226 *      Called after we drop out of wake on lan mode in order to
 227 *      reset the Wake on lan features. This function doesn't restore
 228 *      the rest of the logic from the result of sleep/wakeup
 229 */
 230static void mac_wol_reset(struct mac_regs __iomem *regs)
 231{
 232
 233        /* Turn off SWPTAG right after leaving power mode */
 234        BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 235        /* clear sticky bits */
 236        BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 237
 238        BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 239        BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 240        /* disable force PME-enable */
 241        writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 242        /* disable power-event config bit */
 243        writew(0xFFFF, &regs->WOLCRClr);
 244        /* clear power status */
 245        writew(0xFFFF, &regs->WOLSRClr);
 246}
 247
 248static const struct ethtool_ops velocity_ethtool_ops;
 249
 250/*
 251    Define module options
 252*/
 253
 254MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 255MODULE_LICENSE("GPL");
 256MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 257
 258#define VELOCITY_PARAM(N, D) \
 259        static int N[MAX_UNITS] = OPTION_DEFAULT;\
 260        module_param_array(N, int, NULL, 0); \
 261        MODULE_PARM_DESC(N, D);
 262
 263#define RX_DESC_MIN     64
 264#define RX_DESC_MAX     255
 265#define RX_DESC_DEF     64
 266VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 267
 268#define TX_DESC_MIN     16
 269#define TX_DESC_MAX     256
 270#define TX_DESC_DEF     64
 271VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 272
 273#define RX_THRESH_MIN   0
 274#define RX_THRESH_MAX   3
 275#define RX_THRESH_DEF   0
 276/* rx_thresh[] is used for controlling the receive fifo threshold.
 277   0: indicate the rxfifo threshold is 128 bytes.
 278   1: indicate the rxfifo threshold is 512 bytes.
 279   2: indicate the rxfifo threshold is 1024 bytes.
 280   3: indicate the rxfifo threshold is store & forward.
 281*/
 282VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 283
 284#define DMA_LENGTH_MIN  0
 285#define DMA_LENGTH_MAX  7
 286#define DMA_LENGTH_DEF  6
 287
 288/* DMA_length[] is used for controlling the DMA length
 289   0: 8 DWORDs
 290   1: 16 DWORDs
 291   2: 32 DWORDs
 292   3: 64 DWORDs
 293   4: 128 DWORDs
 294   5: 256 DWORDs
 295   6: SF(flush till emply)
 296   7: SF(flush till emply)
 297*/
 298VELOCITY_PARAM(DMA_length, "DMA length");
 299
 300#define IP_ALIG_DEF     0
 301/* IP_byte_align[] is used for IP header DWORD byte aligned
 302   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 303   1: indicate the IP header will be DWORD byte aligned.
 304      In some environment, the IP header should be DWORD byte aligned,
 305      or the packet will be droped when we receive it. (eg: IPVS)
 306*/
 307VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 308
 309#define FLOW_CNTL_DEF   1
 310#define FLOW_CNTL_MIN   1
 311#define FLOW_CNTL_MAX   5
 312
 313/* flow_control[] is used for setting the flow control ability of NIC.
 314   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 315   2: enable TX flow control.
 316   3: enable RX flow control.
 317   4: enable RX/TX flow control.
 318   5: disable
 319*/
 320VELOCITY_PARAM(flow_control, "Enable flow control ability");
 321
 322#define MED_LNK_DEF 0
 323#define MED_LNK_MIN 0
 324#define MED_LNK_MAX 5
 325/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 326   0: indicate autonegotiation for both speed and duplex mode
 327   1: indicate 100Mbps half duplex mode
 328   2: indicate 100Mbps full duplex mode
 329   3: indicate 10Mbps half duplex mode
 330   4: indicate 10Mbps full duplex mode
 331   5: indicate 1000Mbps full duplex mode
 332
 333   Note:
 334   if EEPROM have been set to the force mode, this option is ignored
 335   by driver.
 336*/
 337VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 338
 339#define WOL_OPT_DEF     0
 340#define WOL_OPT_MIN     0
 341#define WOL_OPT_MAX     7
 342/* wol_opts[] is used for controlling wake on lan behavior.
 343   0: Wake up if recevied a magic packet. (Default)
 344   1: Wake up if link status is on/off.
 345   2: Wake up if recevied an arp packet.
 346   4: Wake up if recevied any unicast packet.
 347   Those value can be sumed up to support more than one option.
 348*/
 349VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 350
 351static int rx_copybreak = 200;
 352module_param(rx_copybreak, int, 0644);
 353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 354
 355/*
 356 *      Internal board variants. At the moment we have only one
 357 */
 358static struct velocity_info_tbl chip_info_table[] = {
 359        {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 360        { }
 361};
 362
 363/*
 364 *      Describe the PCI device identifiers that we support in this
 365 *      device driver. Used for hotplug autoloading.
 366 */
 367
 368static const struct pci_device_id velocity_pci_id_table[] = {
 369        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 370        { }
 371};
 372
 373MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
 374
 375/*
 376 *      Describe the OF device identifiers that we support in this
 377 *      device driver. Used for devicetree nodes.
 378 */
 379static const struct of_device_id velocity_of_ids[] = {
 380        { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
 381        { /* Sentinel */ },
 382};
 383MODULE_DEVICE_TABLE(of, velocity_of_ids);
 384
 385/**
 386 *      get_chip_name   -       identifier to name
 387 *      @chip_id: chip identifier
 388 *
 389 *      Given a chip identifier return a suitable description. Returns
 390 *      a pointer a static string valid while the driver is loaded.
 391 */
 392static const char *get_chip_name(enum chip_type chip_id)
 393{
 394        int i;
 395        for (i = 0; chip_info_table[i].name != NULL; i++)
 396                if (chip_info_table[i].chip_id == chip_id)
 397                        break;
 398        return chip_info_table[i].name;
 399}
 400
 401/**
 402 *      velocity_set_int_opt    -       parser for integer options
 403 *      @opt: pointer to option value
 404 *      @val: value the user requested (or -1 for default)
 405 *      @min: lowest value allowed
 406 *      @max: highest value allowed
 407 *      @def: default value
 408 *      @name: property name
 409 *
 410 *      Set an integer property in the module options. This function does
 411 *      all the verification and checking as well as reporting so that
 412 *      we don't duplicate code for each option.
 413 */
 414static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
 415                                 char *name)
 416{
 417        if (val == -1)
 418                *opt = def;
 419        else if (val < min || val > max) {
 420                pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 421                          name, min, max);
 422                *opt = def;
 423        } else {
 424                pr_info("set value of parameter %s to %d\n", name, val);
 425                *opt = val;
 426        }
 427}
 428
 429/**
 430 *      velocity_set_bool_opt   -       parser for boolean options
 431 *      @opt: pointer to option value
 432 *      @val: value the user requested (or -1 for default)
 433 *      @def: default value (yes/no)
 434 *      @flag: numeric value to set for true.
 435 *      @name: property name
 436 *
 437 *      Set a boolean property in the module options. This function does
 438 *      all the verification and checking as well as reporting so that
 439 *      we don't duplicate code for each option.
 440 */
 441static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
 442                                  char *name)
 443{
 444        (*opt) &= (~flag);
 445        if (val == -1)
 446                *opt |= (def ? flag : 0);
 447        else if (val < 0 || val > 1) {
 448                pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 449                          name, 0, 1);
 450                *opt |= (def ? flag : 0);
 451        } else {
 452                pr_info("set parameter %s to %s\n",
 453                        name, val ? "TRUE" : "FALSE");
 454                *opt |= (val ? flag : 0);
 455        }
 456}
 457
 458/**
 459 *      velocity_get_options    -       set options on device
 460 *      @opts: option structure for the device
 461 *      @index: index of option to use in module options array
 462 *
 463 *      Turn the module and command options into a single structure
 464 *      for the current device
 465 */
 466static void velocity_get_options(struct velocity_opt *opts, int index)
 467{
 468
 469        velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
 470                             RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
 471                             "rx_thresh");
 472        velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
 473                             DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
 474                             "DMA_length");
 475        velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
 476                             RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
 477                             "RxDescriptors");
 478        velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
 479                             TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
 480                             "TxDescriptors");
 481
 482        velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
 483                             FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
 484                             "flow_control");
 485        velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
 486                              IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
 487                              "IP_byte_align");
 488        velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
 489                             MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
 490                             "Media link mode");
 491        velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
 492                             WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
 493                             "Wake On Lan options");
 494        opts->numrx = (opts->numrx & ~3);
 495}
 496
 497/**
 498 *      velocity_init_cam_filter        -       initialise CAM
 499 *      @vptr: velocity to program
 500 *
 501 *      Initialize the content addressable memory used for filters. Load
 502 *      appropriately according to the presence of VLAN
 503 */
 504static void velocity_init_cam_filter(struct velocity_info *vptr)
 505{
 506        struct mac_regs __iomem *regs = vptr->mac_regs;
 507        unsigned int vid, i = 0;
 508
 509        /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 510        WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 511        WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 512
 513        /* Disable all CAMs */
 514        memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 515        memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 516        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 517        mac_set_cam_mask(regs, vptr->mCAMmask);
 518
 519        /* Enable VCAMs */
 520        for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 521                mac_set_vlan_cam(regs, i, (u8 *) &vid);
 522                vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 523                if (++i >= VCAM_SIZE)
 524                        break;
 525        }
 526        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 527}
 528
 529static int velocity_vlan_rx_add_vid(struct net_device *dev,
 530                                    __be16 proto, u16 vid)
 531{
 532        struct velocity_info *vptr = netdev_priv(dev);
 533
 534        spin_lock_irq(&vptr->lock);
 535        set_bit(vid, vptr->active_vlans);
 536        velocity_init_cam_filter(vptr);
 537        spin_unlock_irq(&vptr->lock);
 538        return 0;
 539}
 540
 541static int velocity_vlan_rx_kill_vid(struct net_device *dev,
 542                                     __be16 proto, u16 vid)
 543{
 544        struct velocity_info *vptr = netdev_priv(dev);
 545
 546        spin_lock_irq(&vptr->lock);
 547        clear_bit(vid, vptr->active_vlans);
 548        velocity_init_cam_filter(vptr);
 549        spin_unlock_irq(&vptr->lock);
 550        return 0;
 551}
 552
 553static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 554{
 555        vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 556}
 557
 558/**
 559 *      velocity_rx_reset       -       handle a receive reset
 560 *      @vptr: velocity we are resetting
 561 *
 562 *      Reset the ownership and status for the receive ring side.
 563 *      Hand all the receive queue to the NIC.
 564 */
 565static void velocity_rx_reset(struct velocity_info *vptr)
 566{
 567
 568        struct mac_regs __iomem *regs = vptr->mac_regs;
 569        int i;
 570
 571        velocity_init_rx_ring_indexes(vptr);
 572
 573        /*
 574         *      Init state, all RD entries belong to the NIC
 575         */
 576        for (i = 0; i < vptr->options.numrx; ++i)
 577                vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 578
 579        writew(vptr->options.numrx, &regs->RBRDU);
 580        writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 581        writew(0, &regs->RDIdx);
 582        writew(vptr->options.numrx - 1, &regs->RDCSize);
 583}
 584
 585/**
 586 *      velocity_get_opt_media_mode     -       get media selection
 587 *      @vptr: velocity adapter
 588 *
 589 *      Get the media mode stored in EEPROM or module options and load
 590 *      mii_status accordingly. The requested link state information
 591 *      is also returned.
 592 */
 593static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 594{
 595        u32 status = 0;
 596
 597        switch (vptr->options.spd_dpx) {
 598        case SPD_DPX_AUTO:
 599                status = VELOCITY_AUTONEG_ENABLE;
 600                break;
 601        case SPD_DPX_100_FULL:
 602                status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 603                break;
 604        case SPD_DPX_10_FULL:
 605                status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 606                break;
 607        case SPD_DPX_100_HALF:
 608                status = VELOCITY_SPEED_100;
 609                break;
 610        case SPD_DPX_10_HALF:
 611                status = VELOCITY_SPEED_10;
 612                break;
 613        case SPD_DPX_1000_FULL:
 614                status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 615                break;
 616        }
 617        vptr->mii_status = status;
 618        return status;
 619}
 620
 621/**
 622 *      safe_disable_mii_autopoll       -       autopoll off
 623 *      @regs: velocity registers
 624 *
 625 *      Turn off the autopoll and wait for it to disable on the chip
 626 */
 627static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 628{
 629        u16 ww;
 630
 631        /*  turn off MAUTO */
 632        writeb(0, &regs->MIICR);
 633        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 634                udelay(1);
 635                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 636                        break;
 637        }
 638}
 639
 640/**
 641 *      enable_mii_autopoll     -       turn on autopolling
 642 *      @regs: velocity registers
 643 *
 644 *      Enable the MII link status autopoll feature on the Velocity
 645 *      hardware. Wait for it to enable.
 646 */
 647static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 648{
 649        int ii;
 650
 651        writeb(0, &(regs->MIICR));
 652        writeb(MIIADR_SWMPL, &regs->MIIADR);
 653
 654        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 655                udelay(1);
 656                if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 657                        break;
 658        }
 659
 660        writeb(MIICR_MAUTO, &regs->MIICR);
 661
 662        for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 663                udelay(1);
 664                if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 665                        break;
 666        }
 667
 668}
 669
 670/**
 671 *      velocity_mii_read       -       read MII data
 672 *      @regs: velocity registers
 673 *      @index: MII register index
 674 *      @data: buffer for received data
 675 *
 676 *      Perform a single read of an MII 16bit register. Returns zero
 677 *      on success or -ETIMEDOUT if the PHY did not respond.
 678 */
 679static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 680{
 681        u16 ww;
 682
 683        /*
 684         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 685         */
 686        safe_disable_mii_autopoll(regs);
 687
 688        writeb(index, &regs->MIIADR);
 689
 690        BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 691
 692        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 693                if (!(readb(&regs->MIICR) & MIICR_RCMD))
 694                        break;
 695        }
 696
 697        *data = readw(&regs->MIIDATA);
 698
 699        enable_mii_autopoll(regs);
 700        if (ww == W_MAX_TIMEOUT)
 701                return -ETIMEDOUT;
 702        return 0;
 703}
 704
 705/**
 706 *      mii_check_media_mode    -       check media state
 707 *      @regs: velocity registers
 708 *
 709 *      Check the current MII status and determine the link status
 710 *      accordingly
 711 */
 712static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 713{
 714        u32 status = 0;
 715        u16 ANAR;
 716
 717        if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 718                status |= VELOCITY_LINK_FAIL;
 719
 720        if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 721                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 722        else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 723                status |= (VELOCITY_SPEED_1000);
 724        else {
 725                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 726                if (ANAR & ADVERTISE_100FULL)
 727                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 728                else if (ANAR & ADVERTISE_100HALF)
 729                        status |= VELOCITY_SPEED_100;
 730                else if (ANAR & ADVERTISE_10FULL)
 731                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 732                else
 733                        status |= (VELOCITY_SPEED_10);
 734        }
 735
 736        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 737                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 738                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 739                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 740                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 741                                status |= VELOCITY_AUTONEG_ENABLE;
 742                }
 743        }
 744
 745        return status;
 746}
 747
 748/**
 749 *      velocity_mii_write      -       write MII data
 750 *      @regs: velocity registers
 751 *      @mii_addr: MII register index
 752 *      @data: 16bit data for the MII register
 753 *
 754 *      Perform a single write to an MII 16bit register. Returns zero
 755 *      on success or -ETIMEDOUT if the PHY did not respond.
 756 */
 757static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 758{
 759        u16 ww;
 760
 761        /*
 762         *      Disable MIICR_MAUTO, so that mii addr can be set normally
 763         */
 764        safe_disable_mii_autopoll(regs);
 765
 766        /* MII reg offset */
 767        writeb(mii_addr, &regs->MIIADR);
 768        /* set MII data */
 769        writew(data, &regs->MIIDATA);
 770
 771        /* turn on MIICR_WCMD */
 772        BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 773
 774        /* W_MAX_TIMEOUT is the timeout period */
 775        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 776                udelay(5);
 777                if (!(readb(&regs->MIICR) & MIICR_WCMD))
 778                        break;
 779        }
 780        enable_mii_autopoll(regs);
 781
 782        if (ww == W_MAX_TIMEOUT)
 783                return -ETIMEDOUT;
 784        return 0;
 785}
 786
 787/**
 788 *      set_mii_flow_control    -       flow control setup
 789 *      @vptr: velocity interface
 790 *
 791 *      Set up the flow control on this interface according to
 792 *      the supplied user/eeprom options.
 793 */
 794static void set_mii_flow_control(struct velocity_info *vptr)
 795{
 796        /*Enable or Disable PAUSE in ANAR */
 797        switch (vptr->options.flow_cntl) {
 798        case FLOW_CNTL_TX:
 799                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 800                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 801                break;
 802
 803        case FLOW_CNTL_RX:
 804                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 805                MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 806                break;
 807
 808        case FLOW_CNTL_TX_RX:
 809                MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 810                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 811                break;
 812
 813        case FLOW_CNTL_DISABLE:
 814                MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 815                MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 816                break;
 817        default:
 818                break;
 819        }
 820}
 821
 822/**
 823 *      mii_set_auto_on         -       autonegotiate on
 824 *      @vptr: velocity
 825 *
 826 *      Enable autonegotation on this interface
 827 */
 828static void mii_set_auto_on(struct velocity_info *vptr)
 829{
 830        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 831                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 832        else
 833                MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 834}
 835
 836static u32 check_connection_type(struct mac_regs __iomem *regs)
 837{
 838        u32 status = 0;
 839        u8 PHYSR0;
 840        u16 ANAR;
 841        PHYSR0 = readb(&regs->PHYSR0);
 842
 843        /*
 844           if (!(PHYSR0 & PHYSR0_LINKGD))
 845           status|=VELOCITY_LINK_FAIL;
 846         */
 847
 848        if (PHYSR0 & PHYSR0_FDPX)
 849                status |= VELOCITY_DUPLEX_FULL;
 850
 851        if (PHYSR0 & PHYSR0_SPDG)
 852                status |= VELOCITY_SPEED_1000;
 853        else if (PHYSR0 & PHYSR0_SPD10)
 854                status |= VELOCITY_SPEED_10;
 855        else
 856                status |= VELOCITY_SPEED_100;
 857
 858        if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 859                velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 860                if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 861                    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 862                        if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 863                                status |= VELOCITY_AUTONEG_ENABLE;
 864                }
 865        }
 866
 867        return status;
 868}
 869
 870/**
 871 *      velocity_set_media_mode         -       set media mode
 872 *      @vptr: velocity adapter
 873 *      @mii_status: old MII link state
 874 *
 875 *      Check the media link state and configure the flow control
 876 *      PHY and also velocity hardware setup accordingly. In particular
 877 *      we need to set up CD polling and frame bursting.
 878 */
 879static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 880{
 881        struct mac_regs __iomem *regs = vptr->mac_regs;
 882
 883        vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 884
 885        /* Set mii link status */
 886        set_mii_flow_control(vptr);
 887
 888        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 889                MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 890
 891        /*
 892         *      If connection type is AUTO
 893         */
 894        if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 895                netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
 896                /* clear force MAC mode bit */
 897                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 898                /* set duplex mode of MAC according to duplex mode of MII */
 899                MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 900                MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 901                MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 902
 903                /* enable AUTO-NEGO mode */
 904                mii_set_auto_on(vptr);
 905        } else {
 906                u16 CTRL1000;
 907                u16 ANAR;
 908                u8 CHIPGCR;
 909
 910                /*
 911                 * 1. if it's 3119, disable frame bursting in halfduplex mode
 912                 *    and enable it in fullduplex mode
 913                 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 914                 * 3. only enable CD heart beat counter in 10HD mode
 915                 */
 916
 917                /* set force MAC mode bit */
 918                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 919
 920                CHIPGCR = readb(&regs->CHIPGCR);
 921
 922                if (mii_status & VELOCITY_SPEED_1000)
 923                        CHIPGCR |= CHIPGCR_FCGMII;
 924                else
 925                        CHIPGCR &= ~CHIPGCR_FCGMII;
 926
 927                if (mii_status & VELOCITY_DUPLEX_FULL) {
 928                        CHIPGCR |= CHIPGCR_FCFDX;
 929                        writeb(CHIPGCR, &regs->CHIPGCR);
 930                        netdev_info(vptr->netdev,
 931                                    "set Velocity to forced full mode\n");
 932                        if (vptr->rev_id < REV_ID_VT3216_A0)
 933                                BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 934                } else {
 935                        CHIPGCR &= ~CHIPGCR_FCFDX;
 936                        netdev_info(vptr->netdev,
 937                                    "set Velocity to forced half mode\n");
 938                        writeb(CHIPGCR, &regs->CHIPGCR);
 939                        if (vptr->rev_id < REV_ID_VT3216_A0)
 940                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 941                }
 942
 943                velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 944                CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 945                if ((mii_status & VELOCITY_SPEED_1000) &&
 946                    (mii_status & VELOCITY_DUPLEX_FULL)) {
 947                        CTRL1000 |= ADVERTISE_1000FULL;
 948                }
 949                velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 950
 951                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 952                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 953                else
 954                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 955
 956                /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 957                velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 958                ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 959                if (mii_status & VELOCITY_SPEED_100) {
 960                        if (mii_status & VELOCITY_DUPLEX_FULL)
 961                                ANAR |= ADVERTISE_100FULL;
 962                        else
 963                                ANAR |= ADVERTISE_100HALF;
 964                } else if (mii_status & VELOCITY_SPEED_10) {
 965                        if (mii_status & VELOCITY_DUPLEX_FULL)
 966                                ANAR |= ADVERTISE_10FULL;
 967                        else
 968                                ANAR |= ADVERTISE_10HALF;
 969                }
 970                velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 971                /* enable AUTO-NEGO mode */
 972                mii_set_auto_on(vptr);
 973                /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 974        }
 975        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 976        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 977        return VELOCITY_LINK_CHANGE;
 978}
 979
 980/**
 981 *      velocity_print_link_status      -       link status reporting
 982 *      @vptr: velocity to report on
 983 *
 984 *      Turn the link status of the velocity card into a kernel log
 985 *      description of the new link state, detailing speed and duplex
 986 *      status
 987 */
 988static void velocity_print_link_status(struct velocity_info *vptr)
 989{
 990        const char *link;
 991        const char *speed;
 992        const char *duplex;
 993
 994        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
 995                netdev_notice(vptr->netdev, "failed to detect cable link\n");
 996                return;
 997        }
 998
 999        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1000                link = "auto-negotiation";
1001
1002                if (vptr->mii_status & VELOCITY_SPEED_1000)
1003                        speed = "1000";
1004                else if (vptr->mii_status & VELOCITY_SPEED_100)
1005                        speed = "100";
1006                else
1007                        speed = "10";
1008
1009                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1010                        duplex = "full";
1011                else
1012                        duplex = "half";
1013        } else {
1014                link = "forced";
1015
1016                switch (vptr->options.spd_dpx) {
1017                case SPD_DPX_1000_FULL:
1018                        speed = "1000";
1019                        duplex = "full";
1020                        break;
1021                case SPD_DPX_100_HALF:
1022                        speed = "100";
1023                        duplex = "half";
1024                        break;
1025                case SPD_DPX_100_FULL:
1026                        speed = "100";
1027                        duplex = "full";
1028                        break;
1029                case SPD_DPX_10_HALF:
1030                        speed = "10";
1031                        duplex = "half";
1032                        break;
1033                case SPD_DPX_10_FULL:
1034                        speed = "10";
1035                        duplex = "full";
1036                        break;
1037                default:
1038                        speed = "unknown";
1039                        duplex = "unknown";
1040                        break;
1041                }
1042        }
1043        netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1044                      link, speed, duplex);
1045}
1046
1047/**
1048 *      enable_flow_control_ability     -       flow control
1049 *      @vptr: veloity to configure
1050 *
1051 *      Set up flow control according to the flow control options
1052 *      determined by the eeprom/configuration.
1053 */
1054static void enable_flow_control_ability(struct velocity_info *vptr)
1055{
1056
1057        struct mac_regs __iomem *regs = vptr->mac_regs;
1058
1059        switch (vptr->options.flow_cntl) {
1060
1061        case FLOW_CNTL_DEFAULT:
1062                if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1063                        writel(CR0_FDXRFCEN, &regs->CR0Set);
1064                else
1065                        writel(CR0_FDXRFCEN, &regs->CR0Clr);
1066
1067                if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1068                        writel(CR0_FDXTFCEN, &regs->CR0Set);
1069                else
1070                        writel(CR0_FDXTFCEN, &regs->CR0Clr);
1071                break;
1072
1073        case FLOW_CNTL_TX:
1074                writel(CR0_FDXTFCEN, &regs->CR0Set);
1075                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1076                break;
1077
1078        case FLOW_CNTL_RX:
1079                writel(CR0_FDXRFCEN, &regs->CR0Set);
1080                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1081                break;
1082
1083        case FLOW_CNTL_TX_RX:
1084                writel(CR0_FDXTFCEN, &regs->CR0Set);
1085                writel(CR0_FDXRFCEN, &regs->CR0Set);
1086                break;
1087
1088        case FLOW_CNTL_DISABLE:
1089                writel(CR0_FDXRFCEN, &regs->CR0Clr);
1090                writel(CR0_FDXTFCEN, &regs->CR0Clr);
1091                break;
1092
1093        default:
1094                break;
1095        }
1096
1097}
1098
1099/**
1100 *      velocity_soft_reset     -       soft reset
1101 *      @vptr: velocity to reset
1102 *
1103 *      Kick off a soft reset of the velocity adapter and then poll
1104 *      until the reset sequence has completed before returning.
1105 */
1106static int velocity_soft_reset(struct velocity_info *vptr)
1107{
1108        struct mac_regs __iomem *regs = vptr->mac_regs;
1109        int i = 0;
1110
1111        writel(CR0_SFRST, &regs->CR0Set);
1112
1113        for (i = 0; i < W_MAX_TIMEOUT; i++) {
1114                udelay(5);
1115                if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1116                        break;
1117        }
1118
1119        if (i == W_MAX_TIMEOUT) {
1120                writel(CR0_FORSRST, &regs->CR0Set);
1121                /* FIXME: PCI POSTING */
1122                /* delay 2ms */
1123                mdelay(2);
1124        }
1125        return 0;
1126}
1127
1128/**
1129 *      velocity_set_multi      -       filter list change callback
1130 *      @dev: network device
1131 *
1132 *      Called by the network layer when the filter lists need to change
1133 *      for a velocity adapter. Reload the CAMs with the new address
1134 *      filter ruleset.
1135 */
1136static void velocity_set_multi(struct net_device *dev)
1137{
1138        struct velocity_info *vptr = netdev_priv(dev);
1139        struct mac_regs __iomem *regs = vptr->mac_regs;
1140        u8 rx_mode;
1141        int i;
1142        struct netdev_hw_addr *ha;
1143
1144        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1145                writel(0xffffffff, &regs->MARCAM[0]);
1146                writel(0xffffffff, &regs->MARCAM[4]);
1147                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1148        } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1149                   (dev->flags & IFF_ALLMULTI)) {
1150                writel(0xffffffff, &regs->MARCAM[0]);
1151                writel(0xffffffff, &regs->MARCAM[4]);
1152                rx_mode = (RCR_AM | RCR_AB);
1153        } else {
1154                int offset = MCAM_SIZE - vptr->multicast_limit;
1155                mac_get_cam_mask(regs, vptr->mCAMmask);
1156
1157                i = 0;
1158                netdev_for_each_mc_addr(ha, dev) {
1159                        mac_set_cam(regs, i + offset, ha->addr);
1160                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1161                        i++;
1162                }
1163
1164                mac_set_cam_mask(regs, vptr->mCAMmask);
1165                rx_mode = RCR_AM | RCR_AB | RCR_AP;
1166        }
1167        if (dev->mtu > 1500)
1168                rx_mode |= RCR_AL;
1169
1170        BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1171
1172}
1173
1174/*
1175 * MII access , media link mode setting functions
1176 */
1177
1178/**
1179 *      mii_init        -       set up MII
1180 *      @vptr: velocity adapter
1181 *      @mii_status:  links tatus
1182 *
1183 *      Set up the PHY for the current link state.
1184 */
1185static void mii_init(struct velocity_info *vptr, u32 mii_status)
1186{
1187        u16 BMCR;
1188
1189        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190        case PHYID_ICPLUS_IP101A:
1191                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192                                                MII_ADVERTISE, vptr->mac_regs);
1193                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195                                                                vptr->mac_regs);
1196                else
1197                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198                                                                vptr->mac_regs);
1199                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200                break;
1201        case PHYID_CICADA_CS8201:
1202                /*
1203                 *      Reset to hardware default
1204                 */
1205                MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206                /*
1207                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1208                 *      off it in NWay-forced half mode for NWay-forced v.s.
1209                 *      legacy-forced issue.
1210                 */
1211                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213                else
1214                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215                /*
1216                 *      Turn on Link/Activity LED enable bit for CIS8201
1217                 */
1218                MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1219                break;
1220        case PHYID_VT3216_32BIT:
1221        case PHYID_VT3216_64BIT:
1222                /*
1223                 *      Reset to hardware default
1224                 */
1225                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1226                /*
1227                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
1228                 *      off it in NWay-forced half mode for NWay-forced v.s.
1229                 *      legacy-forced issue
1230                 */
1231                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1232                        MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1233                else
1234                        MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1235                break;
1236
1237        case PHYID_MARVELL_1000:
1238        case PHYID_MARVELL_1000S:
1239                /*
1240                 *      Assert CRS on Transmit
1241                 */
1242                MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1243                /*
1244                 *      Reset to hardware default
1245                 */
1246                MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1247                break;
1248        default:
1249                ;
1250        }
1251        velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1252        if (BMCR & BMCR_ISOLATE) {
1253                BMCR &= ~BMCR_ISOLATE;
1254                velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1255        }
1256}
1257
1258/**
1259 * setup_queue_timers   -       Setup interrupt timers
1260 * @vptr: velocity adapter
1261 *
1262 * Setup interrupt frequency during suppression (timeout if the frame
1263 * count isn't filled).
1264 */
1265static void setup_queue_timers(struct velocity_info *vptr)
1266{
1267        /* Only for newer revisions */
1268        if (vptr->rev_id >= REV_ID_VT3216_A0) {
1269                u8 txqueue_timer = 0;
1270                u8 rxqueue_timer = 0;
1271
1272                if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1273                                VELOCITY_SPEED_100)) {
1274                        txqueue_timer = vptr->options.txqueue_timer;
1275                        rxqueue_timer = vptr->options.rxqueue_timer;
1276                }
1277
1278                writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1279                writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1280        }
1281}
1282
1283/**
1284 * setup_adaptive_interrupts  -  Setup interrupt suppression
1285 * @vptr: velocity adapter
1286 *
1287 * The velocity is able to suppress interrupt during high interrupt load.
1288 * This function turns on that feature.
1289 */
1290static void setup_adaptive_interrupts(struct velocity_info *vptr)
1291{
1292        struct mac_regs __iomem *regs = vptr->mac_regs;
1293        u16 tx_intsup = vptr->options.tx_intsup;
1294        u16 rx_intsup = vptr->options.rx_intsup;
1295
1296        /* Setup default interrupt mask (will be changed below) */
1297        vptr->int_mask = INT_MASK_DEF;
1298
1299        /* Set Tx Interrupt Suppression Threshold */
1300        writeb(CAMCR_PS0, &regs->CAMCR);
1301        if (tx_intsup != 0) {
1302                vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1303                                ISR_PTX2I | ISR_PTX3I);
1304                writew(tx_intsup, &regs->ISRCTL);
1305        } else
1306                writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1307
1308        /* Set Rx Interrupt Suppression Threshold */
1309        writeb(CAMCR_PS1, &regs->CAMCR);
1310        if (rx_intsup != 0) {
1311                vptr->int_mask &= ~ISR_PRXI;
1312                writew(rx_intsup, &regs->ISRCTL);
1313        } else
1314                writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1315
1316        /* Select page to interrupt hold timer */
1317        writeb(0, &regs->CAMCR);
1318}
1319
1320/**
1321 *      velocity_init_registers -       initialise MAC registers
1322 *      @vptr: velocity to init
1323 *      @type: type of initialisation (hot or cold)
1324 *
1325 *      Initialise the MAC on a reset or on first set up on the
1326 *      hardware.
1327 */
1328static void velocity_init_registers(struct velocity_info *vptr,
1329                                    enum velocity_init_type type)
1330{
1331        struct mac_regs __iomem *regs = vptr->mac_regs;
1332        struct net_device *netdev = vptr->netdev;
1333        int i, mii_status;
1334
1335        mac_wol_reset(regs);
1336
1337        switch (type) {
1338        case VELOCITY_INIT_RESET:
1339        case VELOCITY_INIT_WOL:
1340
1341                netif_stop_queue(netdev);
1342
1343                /*
1344                 *      Reset RX to prevent RX pointer not on the 4X location
1345                 */
1346                velocity_rx_reset(vptr);
1347                mac_rx_queue_run(regs);
1348                mac_rx_queue_wake(regs);
1349
1350                mii_status = velocity_get_opt_media_mode(vptr);
1351                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1352                        velocity_print_link_status(vptr);
1353                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1354                                netif_wake_queue(netdev);
1355                }
1356
1357                enable_flow_control_ability(vptr);
1358
1359                mac_clear_isr(regs);
1360                writel(CR0_STOP, &regs->CR0Clr);
1361                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1362                                                        &regs->CR0Set);
1363
1364                break;
1365
1366        case VELOCITY_INIT_COLD:
1367        default:
1368                /*
1369                 *      Do reset
1370                 */
1371                velocity_soft_reset(vptr);
1372                mdelay(5);
1373
1374                if (!vptr->no_eeprom) {
1375                        mac_eeprom_reload(regs);
1376                        for (i = 0; i < 6; i++)
1377                                writeb(netdev->dev_addr[i], regs->PAR + i);
1378                }
1379
1380                /*
1381                 *      clear Pre_ACPI bit.
1382                 */
1383                BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1384                mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1385                mac_set_dma_length(regs, vptr->options.DMA_length);
1386
1387                writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1388                /*
1389                 *      Back off algorithm use original IEEE standard
1390                 */
1391                BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1392
1393                /*
1394                 *      Init CAM filter
1395                 */
1396                velocity_init_cam_filter(vptr);
1397
1398                /*
1399                 *      Set packet filter: Receive directed and broadcast address
1400                 */
1401                velocity_set_multi(netdev);
1402
1403                /*
1404                 *      Enable MII auto-polling
1405                 */
1406                enable_mii_autopoll(regs);
1407
1408                setup_adaptive_interrupts(vptr);
1409
1410                writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1411                writew(vptr->options.numrx - 1, &regs->RDCSize);
1412                mac_rx_queue_run(regs);
1413                mac_rx_queue_wake(regs);
1414
1415                writew(vptr->options.numtx - 1, &regs->TDCSize);
1416
1417                for (i = 0; i < vptr->tx.numq; i++) {
1418                        writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1419                        mac_tx_queue_run(regs, i);
1420                }
1421
1422                init_flow_control_register(vptr);
1423
1424                writel(CR0_STOP, &regs->CR0Clr);
1425                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1426
1427                mii_status = velocity_get_opt_media_mode(vptr);
1428                netif_stop_queue(netdev);
1429
1430                mii_init(vptr, mii_status);
1431
1432                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1433                        velocity_print_link_status(vptr);
1434                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1435                                netif_wake_queue(netdev);
1436                }
1437
1438                enable_flow_control_ability(vptr);
1439                mac_hw_mibs_init(regs);
1440                mac_write_int_mask(vptr->int_mask, regs);
1441                mac_clear_isr(regs);
1442
1443        }
1444}
1445
1446static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1447{
1448        struct mac_regs __iomem *regs = vptr->mac_regs;
1449        int avail, dirty, unusable;
1450
1451        /*
1452         * RD number must be equal to 4X per hardware spec
1453         * (programming guide rev 1.20, p.13)
1454         */
1455        if (vptr->rx.filled < 4)
1456                return;
1457
1458        wmb();
1459
1460        unusable = vptr->rx.filled & 0x0003;
1461        dirty = vptr->rx.dirty - unusable;
1462        for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1463                dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1464                vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1465        }
1466
1467        writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1468        vptr->rx.filled = unusable;
1469}
1470
1471/**
1472 *      velocity_init_dma_rings -       set up DMA rings
1473 *      @vptr: Velocity to set up
1474 *
1475 *      Allocate PCI mapped DMA rings for the receive and transmit layer
1476 *      to use.
1477 */
1478static int velocity_init_dma_rings(struct velocity_info *vptr)
1479{
1480        struct velocity_opt *opt = &vptr->options;
1481        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1482        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1483        dma_addr_t pool_dma;
1484        void *pool;
1485        unsigned int i;
1486
1487        /*
1488         * Allocate all RD/TD rings a single pool.
1489         *
1490         * dma_alloc_coherent() fulfills the requirement for 64 bytes
1491         * alignment
1492         */
1493        pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1494                                    rx_ring_size, &pool_dma, GFP_ATOMIC);
1495        if (!pool) {
1496                dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1497                        vptr->netdev->name);
1498                return -ENOMEM;
1499        }
1500
1501        vptr->rx.ring = pool;
1502        vptr->rx.pool_dma = pool_dma;
1503
1504        pool += rx_ring_size;
1505        pool_dma += rx_ring_size;
1506
1507        for (i = 0; i < vptr->tx.numq; i++) {
1508                vptr->tx.rings[i] = pool;
1509                vptr->tx.pool_dma[i] = pool_dma;
1510                pool += tx_ring_size;
1511                pool_dma += tx_ring_size;
1512        }
1513
1514        return 0;
1515}
1516
1517static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1518{
1519        vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1520}
1521
1522/**
1523 *      velocity_alloc_rx_buf   -       allocate aligned receive buffer
1524 *      @vptr: velocity
1525 *      @idx: ring index
1526 *
1527 *      Allocate a new full sized buffer for the reception of a frame and
1528 *      map it into PCI space for the hardware to use. The hardware
1529 *      requires *64* byte alignment of the buffer which makes life
1530 *      less fun than would be ideal.
1531 */
1532static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1533{
1534        struct rx_desc *rd = &(vptr->rx.ring[idx]);
1535        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1536
1537        rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1538        if (rd_info->skb == NULL)
1539                return -ENOMEM;
1540
1541        /*
1542         *      Do the gymnastics to get the buffer head for data at
1543         *      64byte alignment.
1544         */
1545        skb_reserve(rd_info->skb,
1546                        64 - ((unsigned long) rd_info->skb->data & 63));
1547        rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1548                                        vptr->rx.buf_sz, DMA_FROM_DEVICE);
1549
1550        /*
1551         *      Fill in the descriptor to match
1552         */
1553
1554        *((u32 *) & (rd->rdesc0)) = 0;
1555        rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1556        rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1557        rd->pa_high = 0;
1558        return 0;
1559}
1560
1561
1562static int velocity_rx_refill(struct velocity_info *vptr)
1563{
1564        int dirty = vptr->rx.dirty, done = 0;
1565
1566        do {
1567                struct rx_desc *rd = vptr->rx.ring + dirty;
1568
1569                /* Fine for an all zero Rx desc at init time as well */
1570                if (rd->rdesc0.len & OWNED_BY_NIC)
1571                        break;
1572
1573                if (!vptr->rx.info[dirty].skb) {
1574                        if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1575                                break;
1576                }
1577                done++;
1578                dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1579        } while (dirty != vptr->rx.curr);
1580
1581        if (done) {
1582                vptr->rx.dirty = dirty;
1583                vptr->rx.filled += done;
1584        }
1585
1586        return done;
1587}
1588
1589/**
1590 *      velocity_free_rd_ring   -       free receive ring
1591 *      @vptr: velocity to clean up
1592 *
1593 *      Free the receive buffers for each ring slot and any
1594 *      attached socket buffers that need to go away.
1595 */
1596static void velocity_free_rd_ring(struct velocity_info *vptr)
1597{
1598        int i;
1599
1600        if (vptr->rx.info == NULL)
1601                return;
1602
1603        for (i = 0; i < vptr->options.numrx; i++) {
1604                struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1605                struct rx_desc *rd = vptr->rx.ring + i;
1606
1607                memset(rd, 0, sizeof(*rd));
1608
1609                if (!rd_info->skb)
1610                        continue;
1611                dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1612                                 DMA_FROM_DEVICE);
1613                rd_info->skb_dma = 0;
1614
1615                dev_kfree_skb(rd_info->skb);
1616                rd_info->skb = NULL;
1617        }
1618
1619        kfree(vptr->rx.info);
1620        vptr->rx.info = NULL;
1621}
1622
1623/**
1624 *      velocity_init_rd_ring   -       set up receive ring
1625 *      @vptr: velocity to configure
1626 *
1627 *      Allocate and set up the receive buffers for each ring slot and
1628 *      assign them to the network adapter.
1629 */
1630static int velocity_init_rd_ring(struct velocity_info *vptr)
1631{
1632        int ret = -ENOMEM;
1633
1634        vptr->rx.info = kcalloc(vptr->options.numrx,
1635                                sizeof(struct velocity_rd_info), GFP_KERNEL);
1636        if (!vptr->rx.info)
1637                goto out;
1638
1639        velocity_init_rx_ring_indexes(vptr);
1640
1641        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1642                netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1643                velocity_free_rd_ring(vptr);
1644                goto out;
1645        }
1646
1647        ret = 0;
1648out:
1649        return ret;
1650}
1651
1652/**
1653 *      velocity_init_td_ring   -       set up transmit ring
1654 *      @vptr:  velocity
1655 *
1656 *      Set up the transmit ring and chain the ring pointers together.
1657 *      Returns zero on success or a negative posix errno code for
1658 *      failure.
1659 */
1660static int velocity_init_td_ring(struct velocity_info *vptr)
1661{
1662        int j;
1663
1664        /* Init the TD ring entries */
1665        for (j = 0; j < vptr->tx.numq; j++) {
1666
1667                vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1668                                            sizeof(struct velocity_td_info),
1669                                            GFP_KERNEL);
1670                if (!vptr->tx.infos[j]) {
1671                        while (--j >= 0)
1672                                kfree(vptr->tx.infos[j]);
1673                        return -ENOMEM;
1674                }
1675
1676                vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1677        }
1678        return 0;
1679}
1680
1681/**
1682 *      velocity_free_dma_rings -       free PCI ring pointers
1683 *      @vptr: Velocity to free from
1684 *
1685 *      Clean up the PCI ring buffers allocated to this velocity.
1686 */
1687static void velocity_free_dma_rings(struct velocity_info *vptr)
1688{
1689        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1690                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1691
1692        dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1693}
1694
1695static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1696{
1697        int ret;
1698
1699        velocity_set_rxbufsize(vptr, mtu);
1700
1701        ret = velocity_init_dma_rings(vptr);
1702        if (ret < 0)
1703                goto out;
1704
1705        ret = velocity_init_rd_ring(vptr);
1706        if (ret < 0)
1707                goto err_free_dma_rings_0;
1708
1709        ret = velocity_init_td_ring(vptr);
1710        if (ret < 0)
1711                goto err_free_rd_ring_1;
1712out:
1713        return ret;
1714
1715err_free_rd_ring_1:
1716        velocity_free_rd_ring(vptr);
1717err_free_dma_rings_0:
1718        velocity_free_dma_rings(vptr);
1719        goto out;
1720}
1721
1722/**
1723 *      velocity_free_tx_buf    -       free transmit buffer
1724 *      @vptr: velocity
1725 *      @tdinfo: buffer
1726 *      @td: transmit descriptor to free
1727 *
1728 *      Release an transmit buffer. If the buffer was preallocated then
1729 *      recycle it, if not then unmap the buffer.
1730 */
1731static void velocity_free_tx_buf(struct velocity_info *vptr,
1732                struct velocity_td_info *tdinfo, struct tx_desc *td)
1733{
1734        struct sk_buff *skb = tdinfo->skb;
1735        int i;
1736
1737        /*
1738         *      Don't unmap the pre-allocated tx_bufs
1739         */
1740        for (i = 0; i < tdinfo->nskb_dma; i++) {
1741                size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1742
1743                /* For scatter-gather */
1744                if (skb_shinfo(skb)->nr_frags > 0)
1745                        pktlen = max_t(size_t, pktlen,
1746                                       td->td_buf[i].size & ~TD_QUEUE);
1747
1748                dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1749                                 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1750        }
1751        dev_consume_skb_irq(skb);
1752        tdinfo->skb = NULL;
1753}
1754
1755/*
1756 *      FIXME: could we merge this with velocity_free_tx_buf ?
1757 */
1758static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1759                                                         int q, int n)
1760{
1761        struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1762        int i;
1763
1764        if (td_info == NULL)
1765                return;
1766
1767        if (td_info->skb) {
1768                for (i = 0; i < td_info->nskb_dma; i++) {
1769                        if (td_info->skb_dma[i]) {
1770                                dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1771                                        td_info->skb->len, DMA_TO_DEVICE);
1772                                td_info->skb_dma[i] = 0;
1773                        }
1774                }
1775                dev_kfree_skb(td_info->skb);
1776                td_info->skb = NULL;
1777        }
1778}
1779
1780/**
1781 *      velocity_free_td_ring   -       free td ring
1782 *      @vptr: velocity
1783 *
1784 *      Free up the transmit ring for this particular velocity adapter.
1785 *      We free the ring contents but not the ring itself.
1786 */
1787static void velocity_free_td_ring(struct velocity_info *vptr)
1788{
1789        int i, j;
1790
1791        for (j = 0; j < vptr->tx.numq; j++) {
1792                if (vptr->tx.infos[j] == NULL)
1793                        continue;
1794                for (i = 0; i < vptr->options.numtx; i++)
1795                        velocity_free_td_ring_entry(vptr, j, i);
1796
1797                kfree(vptr->tx.infos[j]);
1798                vptr->tx.infos[j] = NULL;
1799        }
1800}
1801
1802static void velocity_free_rings(struct velocity_info *vptr)
1803{
1804        velocity_free_td_ring(vptr);
1805        velocity_free_rd_ring(vptr);
1806        velocity_free_dma_rings(vptr);
1807}
1808
1809/**
1810 *      velocity_error  -       handle error from controller
1811 *      @vptr: velocity
1812 *      @status: card status
1813 *
1814 *      Process an error report from the hardware and attempt to recover
1815 *      the card itself. At the moment we cannot recover from some
1816 *      theoretically impossible errors but this could be fixed using
1817 *      the pci_device_failed logic to bounce the hardware
1818 *
1819 */
1820static void velocity_error(struct velocity_info *vptr, int status)
1821{
1822
1823        if (status & ISR_TXSTLI) {
1824                struct mac_regs __iomem *regs = vptr->mac_regs;
1825
1826                netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1827                           readw(&regs->TDIdx[0]));
1828                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1829                writew(TRDCSR_RUN, &regs->TDCSRClr);
1830                netif_stop_queue(vptr->netdev);
1831
1832                /* FIXME: port over the pci_device_failed code and use it
1833                   here */
1834        }
1835
1836        if (status & ISR_SRCI) {
1837                struct mac_regs __iomem *regs = vptr->mac_regs;
1838                int linked;
1839
1840                if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1841                        vptr->mii_status = check_connection_type(regs);
1842
1843                        /*
1844                         *      If it is a 3119, disable frame bursting in
1845                         *      halfduplex mode and enable it in fullduplex
1846                         *       mode
1847                         */
1848                        if (vptr->rev_id < REV_ID_VT3216_A0) {
1849                                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1850                                        BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1851                                else
1852                                        BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1853                        }
1854                        /*
1855                         *      Only enable CD heart beat counter in 10HD mode
1856                         */
1857                        if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1858                                BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1859                        else
1860                                BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1861
1862                        setup_queue_timers(vptr);
1863                }
1864                /*
1865                 *      Get link status from PHYSR0
1866                 */
1867                linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1868
1869                if (linked) {
1870                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1871                        netif_carrier_on(vptr->netdev);
1872                } else {
1873                        vptr->mii_status |= VELOCITY_LINK_FAIL;
1874                        netif_carrier_off(vptr->netdev);
1875                }
1876
1877                velocity_print_link_status(vptr);
1878                enable_flow_control_ability(vptr);
1879
1880                /*
1881                 *      Re-enable auto-polling because SRCI will disable
1882                 *      auto-polling
1883                 */
1884
1885                enable_mii_autopoll(regs);
1886
1887                if (vptr->mii_status & VELOCITY_LINK_FAIL)
1888                        netif_stop_queue(vptr->netdev);
1889                else
1890                        netif_wake_queue(vptr->netdev);
1891
1892        }
1893        if (status & ISR_MIBFI)
1894                velocity_update_hw_mibs(vptr);
1895        if (status & ISR_LSTEI)
1896                mac_rx_queue_wake(vptr->mac_regs);
1897}
1898
1899/**
1900 *      velocity_tx_srv         -       transmit interrupt service
1901 *      @vptr: Velocity
1902 *
1903 *      Scan the queues looking for transmitted packets that
1904 *      we can complete and clean up. Update any statistics as
1905 *      necessary/
1906 */
1907static int velocity_tx_srv(struct velocity_info *vptr)
1908{
1909        struct tx_desc *td;
1910        int qnum;
1911        int full = 0;
1912        int idx;
1913        int works = 0;
1914        struct velocity_td_info *tdinfo;
1915        struct net_device_stats *stats = &vptr->netdev->stats;
1916
1917        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1918                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1919                        idx = (idx + 1) % vptr->options.numtx) {
1920
1921                        /*
1922                         *      Get Tx Descriptor
1923                         */
1924                        td = &(vptr->tx.rings[qnum][idx]);
1925                        tdinfo = &(vptr->tx.infos[qnum][idx]);
1926
1927                        if (td->tdesc0.len & OWNED_BY_NIC)
1928                                break;
1929
1930                        if ((works++ > 15))
1931                                break;
1932
1933                        if (td->tdesc0.TSR & TSR0_TERR) {
1934                                stats->tx_errors++;
1935                                stats->tx_dropped++;
1936                                if (td->tdesc0.TSR & TSR0_CDH)
1937                                        stats->tx_heartbeat_errors++;
1938                                if (td->tdesc0.TSR & TSR0_CRS)
1939                                        stats->tx_carrier_errors++;
1940                                if (td->tdesc0.TSR & TSR0_ABT)
1941                                        stats->tx_aborted_errors++;
1942                                if (td->tdesc0.TSR & TSR0_OWC)
1943                                        stats->tx_window_errors++;
1944                        } else {
1945                                stats->tx_packets++;
1946                                stats->tx_bytes += tdinfo->skb->len;
1947                        }
1948                        velocity_free_tx_buf(vptr, tdinfo, td);
1949                        vptr->tx.used[qnum]--;
1950                }
1951                vptr->tx.tail[qnum] = idx;
1952
1953                if (AVAIL_TD(vptr, qnum) < 1)
1954                        full = 1;
1955        }
1956        /*
1957         *      Look to see if we should kick the transmit network
1958         *      layer for more work.
1959         */
1960        if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1961            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1962                netif_wake_queue(vptr->netdev);
1963        }
1964        return works;
1965}
1966
1967/**
1968 *      velocity_rx_csum        -       checksum process
1969 *      @rd: receive packet descriptor
1970 *      @skb: network layer packet buffer
1971 *
1972 *      Process the status bits for the received packet and determine
1973 *      if the checksum was computed and verified by the hardware
1974 */
1975static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1976{
1977        skb_checksum_none_assert(skb);
1978
1979        if (rd->rdesc1.CSM & CSM_IPKT) {
1980                if (rd->rdesc1.CSM & CSM_IPOK) {
1981                        if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1982                                        (rd->rdesc1.CSM & CSM_UDPKT)) {
1983                                if (!(rd->rdesc1.CSM & CSM_TUPOK))
1984                                        return;
1985                        }
1986                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1987                }
1988        }
1989}
1990
1991/**
1992 *      velocity_rx_copy        -       in place Rx copy for small packets
1993 *      @rx_skb: network layer packet buffer candidate
1994 *      @pkt_size: received data size
1995 *      @vptr: velocity adapter
1996 *
1997 *      Replace the current skb that is scheduled for Rx processing by a
1998 *      shorter, immediately allocated skb, if the received packet is small
1999 *      enough. This function returns a negative value if the received
2000 *      packet is too big or if memory is exhausted.
2001 */
2002static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2003                            struct velocity_info *vptr)
2004{
2005        int ret = -1;
2006        if (pkt_size < rx_copybreak) {
2007                struct sk_buff *new_skb;
2008
2009                new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2010                if (new_skb) {
2011                        new_skb->ip_summed = rx_skb[0]->ip_summed;
2012                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2013                        *rx_skb = new_skb;
2014                        ret = 0;
2015                }
2016
2017        }
2018        return ret;
2019}
2020
2021/**
2022 *      velocity_iph_realign    -       IP header alignment
2023 *      @vptr: velocity we are handling
2024 *      @skb: network layer packet buffer
2025 *      @pkt_size: received data size
2026 *
2027 *      Align IP header on a 2 bytes boundary. This behavior can be
2028 *      configured by the user.
2029 */
2030static inline void velocity_iph_realign(struct velocity_info *vptr,
2031                                        struct sk_buff *skb, int pkt_size)
2032{
2033        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2034                memmove(skb->data + 2, skb->data, pkt_size);
2035                skb_reserve(skb, 2);
2036        }
2037}
2038
2039/**
2040 *      velocity_receive_frame  -       received packet processor
2041 *      @vptr: velocity we are handling
2042 *      @idx: ring index
2043 *
2044 *      A packet has arrived. We process the packet and if appropriate
2045 *      pass the frame up the network stack
2046 */
2047static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2048{
2049        struct net_device_stats *stats = &vptr->netdev->stats;
2050        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2051        struct rx_desc *rd = &(vptr->rx.ring[idx]);
2052        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2053        struct sk_buff *skb;
2054
2055        if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2056                if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2057                        netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2058                stats->rx_length_errors++;
2059                return -EINVAL;
2060        }
2061
2062        if (rd->rdesc0.RSR & RSR_MAR)
2063                stats->multicast++;
2064
2065        skb = rd_info->skb;
2066
2067        dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2068                                    vptr->rx.buf_sz, DMA_FROM_DEVICE);
2069
2070        velocity_rx_csum(rd, skb);
2071
2072        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2073                velocity_iph_realign(vptr, skb, pkt_len);
2074                rd_info->skb = NULL;
2075                dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2076                                 DMA_FROM_DEVICE);
2077        } else {
2078                dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2079                                           vptr->rx.buf_sz, DMA_FROM_DEVICE);
2080        }
2081
2082        skb_put(skb, pkt_len - 4);
2083        skb->protocol = eth_type_trans(skb, vptr->netdev);
2084
2085        if (rd->rdesc0.RSR & RSR_DETAG) {
2086                u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2087
2088                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2089        }
2090        netif_receive_skb(skb);
2091
2092        stats->rx_bytes += pkt_len;
2093        stats->rx_packets++;
2094
2095        return 0;
2096}
2097
2098/**
2099 *      velocity_rx_srv         -       service RX interrupt
2100 *      @vptr: velocity
2101 *      @budget_left: remaining budget
2102 *
2103 *      Walk the receive ring of the velocity adapter and remove
2104 *      any received packets from the receive queue. Hand the ring
2105 *      slots back to the adapter for reuse.
2106 */
2107static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2108{
2109        struct net_device_stats *stats = &vptr->netdev->stats;
2110        int rd_curr = vptr->rx.curr;
2111        int works = 0;
2112
2113        while (works < budget_left) {
2114                struct rx_desc *rd = vptr->rx.ring + rd_curr;
2115
2116                if (!vptr->rx.info[rd_curr].skb)
2117                        break;
2118
2119                if (rd->rdesc0.len & OWNED_BY_NIC)
2120                        break;
2121
2122                rmb();
2123
2124                /*
2125                 *      Don't drop CE or RL error frame although RXOK is off
2126                 */
2127                if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2128                        if (velocity_receive_frame(vptr, rd_curr) < 0)
2129                                stats->rx_dropped++;
2130                } else {
2131                        if (rd->rdesc0.RSR & RSR_CRC)
2132                                stats->rx_crc_errors++;
2133                        if (rd->rdesc0.RSR & RSR_FAE)
2134                                stats->rx_frame_errors++;
2135
2136                        stats->rx_dropped++;
2137                }
2138
2139                rd->size |= RX_INTEN;
2140
2141                rd_curr++;
2142                if (rd_curr >= vptr->options.numrx)
2143                        rd_curr = 0;
2144                works++;
2145        }
2146
2147        vptr->rx.curr = rd_curr;
2148
2149        if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2150                velocity_give_many_rx_descs(vptr);
2151
2152        VAR_USED(stats);
2153        return works;
2154}
2155
2156static int velocity_poll(struct napi_struct *napi, int budget)
2157{
2158        struct velocity_info *vptr = container_of(napi,
2159                        struct velocity_info, napi);
2160        unsigned int rx_done;
2161        unsigned long flags;
2162
2163        /*
2164         * Do rx and tx twice for performance (taken from the VIA
2165         * out-of-tree driver).
2166         */
2167        rx_done = velocity_rx_srv(vptr, budget);
2168        spin_lock_irqsave(&vptr->lock, flags);
2169        velocity_tx_srv(vptr);
2170        /* If budget not fully consumed, exit the polling mode */
2171        if (rx_done < budget) {
2172                napi_complete_done(napi, rx_done);
2173                mac_enable_int(vptr->mac_regs);
2174        }
2175        spin_unlock_irqrestore(&vptr->lock, flags);
2176
2177        return rx_done;
2178}
2179
2180/**
2181 *      velocity_intr           -       interrupt callback
2182 *      @irq: interrupt number
2183 *      @dev_instance: interrupting device
2184 *
2185 *      Called whenever an interrupt is generated by the velocity
2186 *      adapter IRQ line. We may not be the source of the interrupt
2187 *      and need to identify initially if we are, and if not exit as
2188 *      efficiently as possible.
2189 */
2190static irqreturn_t velocity_intr(int irq, void *dev_instance)
2191{
2192        struct net_device *dev = dev_instance;
2193        struct velocity_info *vptr = netdev_priv(dev);
2194        u32 isr_status;
2195
2196        spin_lock(&vptr->lock);
2197        isr_status = mac_read_isr(vptr->mac_regs);
2198
2199        /* Not us ? */
2200        if (isr_status == 0) {
2201                spin_unlock(&vptr->lock);
2202                return IRQ_NONE;
2203        }
2204
2205        /* Ack the interrupt */
2206        mac_write_isr(vptr->mac_regs, isr_status);
2207
2208        if (likely(napi_schedule_prep(&vptr->napi))) {
2209                mac_disable_int(vptr->mac_regs);
2210                __napi_schedule(&vptr->napi);
2211        }
2212
2213        if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2214                velocity_error(vptr, isr_status);
2215
2216        spin_unlock(&vptr->lock);
2217
2218        return IRQ_HANDLED;
2219}
2220
2221/**
2222 *      velocity_open           -       interface activation callback
2223 *      @dev: network layer device to open
2224 *
2225 *      Called when the network layer brings the interface up. Returns
2226 *      a negative posix error code on failure, or zero on success.
2227 *
2228 *      All the ring allocation and set up is done on open for this
2229 *      adapter to minimise memory usage when inactive
2230 */
2231static int velocity_open(struct net_device *dev)
2232{
2233        struct velocity_info *vptr = netdev_priv(dev);
2234        int ret;
2235
2236        ret = velocity_init_rings(vptr, dev->mtu);
2237        if (ret < 0)
2238                goto out;
2239
2240        /* Ensure chip is running */
2241        velocity_set_power_state(vptr, PCI_D0);
2242
2243        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2244
2245        ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2246                          dev->name, dev);
2247        if (ret < 0) {
2248                /* Power down the chip */
2249                velocity_set_power_state(vptr, PCI_D3hot);
2250                velocity_free_rings(vptr);
2251                goto out;
2252        }
2253
2254        velocity_give_many_rx_descs(vptr);
2255
2256        mac_enable_int(vptr->mac_regs);
2257        netif_start_queue(dev);
2258        napi_enable(&vptr->napi);
2259        vptr->flags |= VELOCITY_FLAGS_OPENED;
2260out:
2261        return ret;
2262}
2263
2264/**
2265 *      velocity_shutdown       -       shut down the chip
2266 *      @vptr: velocity to deactivate
2267 *
2268 *      Shuts down the internal operations of the velocity and
2269 *      disables interrupts, autopolling, transmit and receive
2270 */
2271static void velocity_shutdown(struct velocity_info *vptr)
2272{
2273        struct mac_regs __iomem *regs = vptr->mac_regs;
2274        mac_disable_int(regs);
2275        writel(CR0_STOP, &regs->CR0Set);
2276        writew(0xFFFF, &regs->TDCSRClr);
2277        writeb(0xFF, &regs->RDCSRClr);
2278        safe_disable_mii_autopoll(regs);
2279        mac_clear_isr(regs);
2280}
2281
2282/**
2283 *      velocity_change_mtu     -       MTU change callback
2284 *      @dev: network device
2285 *      @new_mtu: desired MTU
2286 *
2287 *      Handle requests from the networking layer for MTU change on
2288 *      this interface. It gets called on a change by the network layer.
2289 *      Return zero for success or negative posix error code.
2290 */
2291static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292{
2293        struct velocity_info *vptr = netdev_priv(dev);
2294        int ret = 0;
2295
2296        if (!netif_running(dev)) {
2297                dev->mtu = new_mtu;
2298                goto out_0;
2299        }
2300
2301        if (dev->mtu != new_mtu) {
2302                struct velocity_info *tmp_vptr;
2303                unsigned long flags;
2304                struct rx_info rx;
2305                struct tx_info tx;
2306
2307                tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2308                if (!tmp_vptr) {
2309                        ret = -ENOMEM;
2310                        goto out_0;
2311                }
2312
2313                tmp_vptr->netdev = dev;
2314                tmp_vptr->pdev = vptr->pdev;
2315                tmp_vptr->dev = vptr->dev;
2316                tmp_vptr->options = vptr->options;
2317                tmp_vptr->tx.numq = vptr->tx.numq;
2318
2319                ret = velocity_init_rings(tmp_vptr, new_mtu);
2320                if (ret < 0)
2321                        goto out_free_tmp_vptr_1;
2322
2323                napi_disable(&vptr->napi);
2324
2325                spin_lock_irqsave(&vptr->lock, flags);
2326
2327                netif_stop_queue(dev);
2328                velocity_shutdown(vptr);
2329
2330                rx = vptr->rx;
2331                tx = vptr->tx;
2332
2333                vptr->rx = tmp_vptr->rx;
2334                vptr->tx = tmp_vptr->tx;
2335
2336                tmp_vptr->rx = rx;
2337                tmp_vptr->tx = tx;
2338
2339                dev->mtu = new_mtu;
2340
2341                velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2342
2343                velocity_give_many_rx_descs(vptr);
2344
2345                napi_enable(&vptr->napi);
2346
2347                mac_enable_int(vptr->mac_regs);
2348                netif_start_queue(dev);
2349
2350                spin_unlock_irqrestore(&vptr->lock, flags);
2351
2352                velocity_free_rings(tmp_vptr);
2353
2354out_free_tmp_vptr_1:
2355                kfree(tmp_vptr);
2356        }
2357out_0:
2358        return ret;
2359}
2360
2361#ifdef CONFIG_NET_POLL_CONTROLLER
2362/**
2363 *  velocity_poll_controller            -       Velocity Poll controller function
2364 *  @dev: network device
2365 *
2366 *
2367 *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2368 *  with interrupts disabled.
2369 */
2370static void velocity_poll_controller(struct net_device *dev)
2371{
2372        disable_irq(dev->irq);
2373        velocity_intr(dev->irq, dev);
2374        enable_irq(dev->irq);
2375}
2376#endif
2377
2378/**
2379 *      velocity_mii_ioctl              -       MII ioctl handler
2380 *      @dev: network device
2381 *      @ifr: the ifreq block for the ioctl
2382 *      @cmd: the command
2383 *
2384 *      Process MII requests made via ioctl from the network layer. These
2385 *      are used by tools like kudzu to interrogate the link state of the
2386 *      hardware
2387 */
2388static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2389{
2390        struct velocity_info *vptr = netdev_priv(dev);
2391        struct mac_regs __iomem *regs = vptr->mac_regs;
2392        unsigned long flags;
2393        struct mii_ioctl_data *miidata = if_mii(ifr);
2394        int err;
2395
2396        switch (cmd) {
2397        case SIOCGMIIPHY:
2398                miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2399                break;
2400        case SIOCGMIIREG:
2401                if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2402                        return -ETIMEDOUT;
2403                break;
2404        case SIOCSMIIREG:
2405                spin_lock_irqsave(&vptr->lock, flags);
2406                err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2407                spin_unlock_irqrestore(&vptr->lock, flags);
2408                check_connection_type(vptr->mac_regs);
2409                if (err)
2410                        return err;
2411                break;
2412        default:
2413                return -EOPNOTSUPP;
2414        }
2415        return 0;
2416}
2417
2418/**
2419 *      velocity_ioctl          -       ioctl entry point
2420 *      @dev: network device
2421 *      @rq: interface request ioctl
2422 *      @cmd: command code
2423 *
2424 *      Called when the user issues an ioctl request to the network
2425 *      device in question. The velocity interface supports MII.
2426 */
2427static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428{
2429        struct velocity_info *vptr = netdev_priv(dev);
2430        int ret;
2431
2432        /* If we are asked for information and the device is power
2433           saving then we need to bring the device back up to talk to it */
2434
2435        if (!netif_running(dev))
2436                velocity_set_power_state(vptr, PCI_D0);
2437
2438        switch (cmd) {
2439        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
2440        case SIOCGMIIREG:       /* Read MII PHY register. */
2441        case SIOCSMIIREG:       /* Write to MII PHY register. */
2442                ret = velocity_mii_ioctl(dev, rq, cmd);
2443                break;
2444
2445        default:
2446                ret = -EOPNOTSUPP;
2447        }
2448        if (!netif_running(dev))
2449                velocity_set_power_state(vptr, PCI_D3hot);
2450
2451
2452        return ret;
2453}
2454
2455/**
2456 *      velocity_get_stats      -       statistics callback
2457 *      @dev: network device
2458 *
2459 *      Callback from the network layer to allow driver statistics
2460 *      to be resynchronized with hardware collected state. In the
2461 *      case of the velocity we need to pull the MIB counters from
2462 *      the hardware into the counters before letting the network
2463 *      layer display them.
2464 */
2465static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2466{
2467        struct velocity_info *vptr = netdev_priv(dev);
2468
2469        /* If the hardware is down, don't touch MII */
2470        if (!netif_running(dev))
2471                return &dev->stats;
2472
2473        spin_lock_irq(&vptr->lock);
2474        velocity_update_hw_mibs(vptr);
2475        spin_unlock_irq(&vptr->lock);
2476
2477        dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2478        dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2479        dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2480
2481//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2482        dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2483        /* detailed rx_errors: */
2484//  unsigned long   rx_length_errors;
2485//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2486        dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2487//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2488//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2489//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2490
2491        /* detailed tx_errors */
2492//  unsigned long   tx_fifo_errors;
2493
2494        return &dev->stats;
2495}
2496
2497/**
2498 *      velocity_close          -       close adapter callback
2499 *      @dev: network device
2500 *
2501 *      Callback from the network layer when the velocity is being
2502 *      deactivated by the network layer
2503 */
2504static int velocity_close(struct net_device *dev)
2505{
2506        struct velocity_info *vptr = netdev_priv(dev);
2507
2508        napi_disable(&vptr->napi);
2509        netif_stop_queue(dev);
2510        velocity_shutdown(vptr);
2511
2512        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2513                velocity_get_ip(vptr);
2514
2515        free_irq(dev->irq, dev);
2516
2517        velocity_free_rings(vptr);
2518
2519        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2520        return 0;
2521}
2522
2523/**
2524 *      velocity_xmit           -       transmit packet callback
2525 *      @skb: buffer to transmit
2526 *      @dev: network device
2527 *
2528 *      Called by the network layer to request a packet is queued to
2529 *      the velocity. Returns zero on success.
2530 */
2531static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2532                                 struct net_device *dev)
2533{
2534        struct velocity_info *vptr = netdev_priv(dev);
2535        int qnum = 0;
2536        struct tx_desc *td_ptr;
2537        struct velocity_td_info *tdinfo;
2538        unsigned long flags;
2539        int pktlen;
2540        int index, prev;
2541        int i = 0;
2542
2543        if (skb_padto(skb, ETH_ZLEN))
2544                goto out;
2545
2546        /* The hardware can handle at most 7 memory segments, so merge
2547         * the skb if there are more */
2548        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2549                dev_kfree_skb_any(skb);
2550                return NETDEV_TX_OK;
2551        }
2552
2553        pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2554                        max_t(unsigned int, skb->len, ETH_ZLEN) :
2555                                skb_headlen(skb);
2556
2557        spin_lock_irqsave(&vptr->lock, flags);
2558
2559        index = vptr->tx.curr[qnum];
2560        td_ptr = &(vptr->tx.rings[qnum][index]);
2561        tdinfo = &(vptr->tx.infos[qnum][index]);
2562
2563        td_ptr->tdesc1.TCR = TCR0_TIC;
2564        td_ptr->td_buf[0].size &= ~TD_QUEUE;
2565
2566        /*
2567         *      Map the linear network buffer into PCI space and
2568         *      add it to the transmit ring.
2569         */
2570        tdinfo->skb = skb;
2571        tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2572                                                                DMA_TO_DEVICE);
2573        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2574        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2575        td_ptr->td_buf[0].pa_high = 0;
2576        td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2577
2578        /* Handle fragments */
2579        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2580                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2581
2582                tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2583                                                          frag, 0,
2584                                                          skb_frag_size(frag),
2585                                                          DMA_TO_DEVICE);
2586
2587                td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2588                td_ptr->td_buf[i + 1].pa_high = 0;
2589                td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2590        }
2591        tdinfo->nskb_dma = i + 1;
2592
2593        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2594
2595        if (skb_vlan_tag_present(skb)) {
2596                td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2597                td_ptr->tdesc1.TCR |= TCR0_VETAG;
2598        }
2599
2600        /*
2601         *      Handle hardware checksum
2602         */
2603        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604                const struct iphdr *ip = ip_hdr(skb);
2605                if (ip->protocol == IPPROTO_TCP)
2606                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2607                else if (ip->protocol == IPPROTO_UDP)
2608                        td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2609                td_ptr->tdesc1.TCR |= TCR0_IPCK;
2610        }
2611
2612        prev = index - 1;
2613        if (prev < 0)
2614                prev = vptr->options.numtx - 1;
2615        td_ptr->tdesc0.len |= OWNED_BY_NIC;
2616        vptr->tx.used[qnum]++;
2617        vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2618
2619        if (AVAIL_TD(vptr, qnum) < 1)
2620                netif_stop_queue(dev);
2621
2622        td_ptr = &(vptr->tx.rings[qnum][prev]);
2623        td_ptr->td_buf[0].size |= TD_QUEUE;
2624        mac_tx_queue_wake(vptr->mac_regs, qnum);
2625
2626        spin_unlock_irqrestore(&vptr->lock, flags);
2627out:
2628        return NETDEV_TX_OK;
2629}
2630
2631static const struct net_device_ops velocity_netdev_ops = {
2632        .ndo_open               = velocity_open,
2633        .ndo_stop               = velocity_close,
2634        .ndo_start_xmit         = velocity_xmit,
2635        .ndo_get_stats          = velocity_get_stats,
2636        .ndo_validate_addr      = eth_validate_addr,
2637        .ndo_set_mac_address    = eth_mac_addr,
2638        .ndo_set_rx_mode        = velocity_set_multi,
2639        .ndo_change_mtu         = velocity_change_mtu,
2640        .ndo_eth_ioctl          = velocity_ioctl,
2641        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
2642        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
2643#ifdef CONFIG_NET_POLL_CONTROLLER
2644        .ndo_poll_controller = velocity_poll_controller,
2645#endif
2646};
2647
2648/**
2649 *      velocity_init_info      -       init private data
2650 *      @vptr: Velocity info
2651 *      @info: Board type
2652 *
2653 *      Set up the initial velocity_info struct for the device that has been
2654 *      discovered.
2655 */
2656static void velocity_init_info(struct velocity_info *vptr,
2657                                const struct velocity_info_tbl *info)
2658{
2659        vptr->chip_id = info->chip_id;
2660        vptr->tx.numq = info->txqueue;
2661        vptr->multicast_limit = MCAM_SIZE;
2662        spin_lock_init(&vptr->lock);
2663}
2664
2665/**
2666 *      velocity_get_pci_info   -       retrieve PCI info for device
2667 *      @vptr: velocity device
2668 *
2669 *      Retrieve the PCI configuration space data that interests us from
2670 *      the kernel PCI layer
2671 */
2672static int velocity_get_pci_info(struct velocity_info *vptr)
2673{
2674        struct pci_dev *pdev = vptr->pdev;
2675
2676        pci_set_master(pdev);
2677
2678        vptr->ioaddr = pci_resource_start(pdev, 0);
2679        vptr->memaddr = pci_resource_start(pdev, 1);
2680
2681        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2682                dev_err(&pdev->dev,
2683                           "region #0 is not an I/O resource, aborting.\n");
2684                return -EINVAL;
2685        }
2686
2687        if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2688                dev_err(&pdev->dev,
2689                           "region #1 is an I/O resource, aborting.\n");
2690                return -EINVAL;
2691        }
2692
2693        if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2694                dev_err(&pdev->dev, "region #1 is too small.\n");
2695                return -EINVAL;
2696        }
2697
2698        return 0;
2699}
2700
2701/**
2702 *      velocity_get_platform_info - retrieve platform info for device
2703 *      @vptr: velocity device
2704 *
2705 *      Retrieve the Platform configuration data that interests us
2706 */
2707static int velocity_get_platform_info(struct velocity_info *vptr)
2708{
2709        struct resource res;
2710        int ret;
2711
2712        if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2713                vptr->no_eeprom = 1;
2714
2715        ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2716        if (ret) {
2717                dev_err(vptr->dev, "unable to find memory address\n");
2718                return ret;
2719        }
2720
2721        vptr->memaddr = res.start;
2722
2723        if (resource_size(&res) < VELOCITY_IO_SIZE) {
2724                dev_err(vptr->dev, "memory region is too small.\n");
2725                return -EINVAL;
2726        }
2727
2728        return 0;
2729}
2730
2731/**
2732 *      velocity_print_info     -       per driver data
2733 *      @vptr: velocity
2734 *
2735 *      Print per driver data as the kernel driver finds Velocity
2736 *      hardware
2737 */
2738static void velocity_print_info(struct velocity_info *vptr)
2739{
2740        netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2741                    get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2742}
2743
2744static u32 velocity_get_link(struct net_device *dev)
2745{
2746        struct velocity_info *vptr = netdev_priv(dev);
2747        struct mac_regs __iomem *regs = vptr->mac_regs;
2748        return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2749}
2750
2751/**
2752 *      velocity_probe - set up discovered velocity device
2753 *      @dev: PCI device
2754 *      @info: table of match
2755 *      @irq: interrupt info
2756 *      @bustype: bus that device is connected to
2757 *
2758 *      Configure a discovered adapter from scratch. Return a negative
2759 *      errno error code on failure paths.
2760 */
2761static int velocity_probe(struct device *dev, int irq,
2762                           const struct velocity_info_tbl *info,
2763                           enum velocity_bus_type bustype)
2764{
2765        struct net_device *netdev;
2766        int i;
2767        struct velocity_info *vptr;
2768        struct mac_regs __iomem *regs;
2769        int ret = -ENOMEM;
2770
2771        /* FIXME: this driver, like almost all other ethernet drivers,
2772         * can support more than MAX_UNITS.
2773         */
2774        if (velocity_nics >= MAX_UNITS) {
2775                dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2776                return -ENODEV;
2777        }
2778
2779        netdev = alloc_etherdev(sizeof(struct velocity_info));
2780        if (!netdev)
2781                goto out;
2782
2783        /* Chain it all together */
2784
2785        SET_NETDEV_DEV(netdev, dev);
2786        vptr = netdev_priv(netdev);
2787
2788        pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2789        pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2790        pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
2791
2792        netdev->irq = irq;
2793        vptr->netdev = netdev;
2794        vptr->dev = dev;
2795
2796        velocity_init_info(vptr, info);
2797
2798        if (bustype == BUS_PCI) {
2799                vptr->pdev = to_pci_dev(dev);
2800
2801                ret = velocity_get_pci_info(vptr);
2802                if (ret < 0)
2803                        goto err_free_dev;
2804        } else {
2805                vptr->pdev = NULL;
2806                ret = velocity_get_platform_info(vptr);
2807                if (ret < 0)
2808                        goto err_free_dev;
2809        }
2810
2811        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2812        if (regs == NULL) {
2813                ret = -EIO;
2814                goto err_free_dev;
2815        }
2816
2817        vptr->mac_regs = regs;
2818        vptr->rev_id = readb(&regs->rev_id);
2819
2820        mac_wol_reset(regs);
2821
2822        for (i = 0; i < 6; i++)
2823                netdev->dev_addr[i] = readb(&regs->PAR[i]);
2824
2825
2826        velocity_get_options(&vptr->options, velocity_nics);
2827
2828        /*
2829         *      Mask out the options cannot be set to the chip
2830         */
2831
2832        vptr->options.flags &= info->flags;
2833
2834        /*
2835         *      Enable the chip specified capbilities
2836         */
2837
2838        vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2839
2840        vptr->wol_opts = vptr->options.wol_opts;
2841        vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2842
2843        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2844
2845        netdev->netdev_ops = &velocity_netdev_ops;
2846        netdev->ethtool_ops = &velocity_ethtool_ops;
2847        netif_napi_add(netdev, &vptr->napi, velocity_poll,
2848                                                        VELOCITY_NAPI_WEIGHT);
2849
2850        netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2851                           NETIF_F_HW_VLAN_CTAG_TX;
2852        netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2853                        NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2854                        NETIF_F_IP_CSUM;
2855
2856        /* MTU range: 64 - 9000 */
2857        netdev->min_mtu = VELOCITY_MIN_MTU;
2858        netdev->max_mtu = VELOCITY_MAX_MTU;
2859
2860        ret = register_netdev(netdev);
2861        if (ret < 0)
2862                goto err_iounmap;
2863
2864        if (!velocity_get_link(netdev)) {
2865                netif_carrier_off(netdev);
2866                vptr->mii_status |= VELOCITY_LINK_FAIL;
2867        }
2868
2869        velocity_print_info(vptr);
2870        dev_set_drvdata(vptr->dev, netdev);
2871
2872        /* and leave the chip powered down */
2873
2874        velocity_set_power_state(vptr, PCI_D3hot);
2875        velocity_nics++;
2876out:
2877        return ret;
2878
2879err_iounmap:
2880        netif_napi_del(&vptr->napi);
2881        iounmap(regs);
2882err_free_dev:
2883        free_netdev(netdev);
2884        goto out;
2885}
2886
2887/**
2888 *      velocity_remove - device unplug
2889 *      @dev: device being removed
2890 *
2891 *      Device unload callback. Called on an unplug or on module
2892 *      unload for each active device that is present. Disconnects
2893 *      the device from the network layer and frees all the resources
2894 */
2895static int velocity_remove(struct device *dev)
2896{
2897        struct net_device *netdev = dev_get_drvdata(dev);
2898        struct velocity_info *vptr = netdev_priv(netdev);
2899
2900        unregister_netdev(netdev);
2901        netif_napi_del(&vptr->napi);
2902        iounmap(vptr->mac_regs);
2903        free_netdev(netdev);
2904        velocity_nics--;
2905
2906        return 0;
2907}
2908
2909static int velocity_pci_probe(struct pci_dev *pdev,
2910                               const struct pci_device_id *ent)
2911{
2912        const struct velocity_info_tbl *info =
2913                                        &chip_info_table[ent->driver_data];
2914        int ret;
2915
2916        ret = pci_enable_device(pdev);
2917        if (ret < 0)
2918                return ret;
2919
2920        ret = pci_request_regions(pdev, VELOCITY_NAME);
2921        if (ret < 0) {
2922                dev_err(&pdev->dev, "No PCI resources.\n");
2923                goto fail1;
2924        }
2925
2926        ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2927        if (ret == 0)
2928                return 0;
2929
2930        pci_release_regions(pdev);
2931fail1:
2932        pci_disable_device(pdev);
2933        return ret;
2934}
2935
2936static void velocity_pci_remove(struct pci_dev *pdev)
2937{
2938        velocity_remove(&pdev->dev);
2939
2940        pci_release_regions(pdev);
2941        pci_disable_device(pdev);
2942}
2943
2944static int velocity_platform_probe(struct platform_device *pdev)
2945{
2946        const struct velocity_info_tbl *info;
2947        int irq;
2948
2949        info = of_device_get_match_data(&pdev->dev);
2950        if (!info)
2951                return -EINVAL;
2952
2953        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2954        if (!irq)
2955                return -EINVAL;
2956
2957        return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2958}
2959
2960static int velocity_platform_remove(struct platform_device *pdev)
2961{
2962        velocity_remove(&pdev->dev);
2963
2964        return 0;
2965}
2966
2967#ifdef CONFIG_PM_SLEEP
2968/**
2969 *      wol_calc_crc            -       WOL CRC
2970 *      @size: size of the wake mask
2971 *      @pattern: data pattern
2972 *      @mask_pattern: mask
2973 *
2974 *      Compute the wake on lan crc hashes for the packet header
2975 *      we are interested in.
2976 */
2977static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2978{
2979        u16 crc = 0xFFFF;
2980        u8 mask;
2981        int i, j;
2982
2983        for (i = 0; i < size; i++) {
2984                mask = mask_pattern[i];
2985
2986                /* Skip this loop if the mask equals to zero */
2987                if (mask == 0x00)
2988                        continue;
2989
2990                for (j = 0; j < 8; j++) {
2991                        if ((mask & 0x01) == 0) {
2992                                mask >>= 1;
2993                                continue;
2994                        }
2995                        mask >>= 1;
2996                        crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2997                }
2998        }
2999        /*      Finally, invert the result once to get the correct data */
3000        crc = ~crc;
3001        return bitrev32(crc) >> 16;
3002}
3003
3004/**
3005 *      velocity_set_wol        -       set up for wake on lan
3006 *      @vptr: velocity to set WOL status on
3007 *
3008 *      Set a card up for wake on lan either by unicast or by
3009 *      ARP packet.
3010 *
3011 *      FIXME: check static buffer is safe here
3012 */
3013static int velocity_set_wol(struct velocity_info *vptr)
3014{
3015        struct mac_regs __iomem *regs = vptr->mac_regs;
3016        enum speed_opt spd_dpx = vptr->options.spd_dpx;
3017        static u8 buf[256];
3018        int i;
3019
3020        static u32 mask_pattern[2][4] = {
3021                {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3022                {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}  /* Magic Packet */
3023        };
3024
3025        writew(0xFFFF, &regs->WOLCRClr);
3026        writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3027        writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3028
3029        /*
3030           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3031           writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3032         */
3033
3034        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3035                writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3036
3037        if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3038                struct arp_packet *arp = (struct arp_packet *) buf;
3039                u16 crc;
3040                memset(buf, 0, sizeof(struct arp_packet) + 7);
3041
3042                for (i = 0; i < 4; i++)
3043                        writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3044
3045                arp->type = htons(ETH_P_ARP);
3046                arp->ar_op = htons(1);
3047
3048                memcpy(arp->ar_tip, vptr->ip_addr, 4);
3049
3050                crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3051                                (u8 *) & mask_pattern[0][0]);
3052
3053                writew(crc, &regs->PatternCRC[0]);
3054                writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3055        }
3056
3057        BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3058        BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3059
3060        writew(0x0FFF, &regs->WOLSRClr);
3061
3062        if (spd_dpx == SPD_DPX_1000_FULL)
3063                goto mac_done;
3064
3065        if (spd_dpx != SPD_DPX_AUTO)
3066                goto advertise_done;
3067
3068        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3069                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3070                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3071
3072                MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3073        }
3074
3075        if (vptr->mii_status & VELOCITY_SPEED_1000)
3076                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3077
3078advertise_done:
3079        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3080
3081        {
3082                u8 GCR;
3083                GCR = readb(&regs->CHIPGCR);
3084                GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3085                writeb(GCR, &regs->CHIPGCR);
3086        }
3087
3088mac_done:
3089        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3090        /* Turn on SWPTAG just before entering power mode */
3091        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3092        /* Go to bed ..... */
3093        BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3094
3095        return 0;
3096}
3097
3098/**
3099 *      velocity_save_context   -       save registers
3100 *      @vptr: velocity
3101 *      @context: buffer for stored context
3102 *
3103 *      Retrieve the current configuration from the velocity hardware
3104 *      and stash it in the context structure, for use by the context
3105 *      restore functions. This allows us to save things we need across
3106 *      power down states
3107 */
3108static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3109{
3110        struct mac_regs __iomem *regs = vptr->mac_regs;
3111        u16 i;
3112        u8 __iomem *ptr = (u8 __iomem *)regs;
3113
3114        for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3115                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3116
3117        for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3118                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3119
3120        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3121                *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3122
3123}
3124
3125static int velocity_suspend(struct device *dev)
3126{
3127        struct net_device *netdev = dev_get_drvdata(dev);
3128        struct velocity_info *vptr = netdev_priv(netdev);
3129        unsigned long flags;
3130
3131        if (!netif_running(vptr->netdev))
3132                return 0;
3133
3134        netif_device_detach(vptr->netdev);
3135
3136        spin_lock_irqsave(&vptr->lock, flags);
3137        if (vptr->pdev)
3138                pci_save_state(vptr->pdev);
3139
3140        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3141                velocity_get_ip(vptr);
3142                velocity_save_context(vptr, &vptr->context);
3143                velocity_shutdown(vptr);
3144                velocity_set_wol(vptr);
3145                if (vptr->pdev)
3146                        pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3147                velocity_set_power_state(vptr, PCI_D3hot);
3148        } else {
3149                velocity_save_context(vptr, &vptr->context);
3150                velocity_shutdown(vptr);
3151                if (vptr->pdev)
3152                        pci_disable_device(vptr->pdev);
3153                velocity_set_power_state(vptr, PCI_D3hot);
3154        }
3155
3156        spin_unlock_irqrestore(&vptr->lock, flags);
3157        return 0;
3158}
3159
3160/**
3161 *      velocity_restore_context        -       restore registers
3162 *      @vptr: velocity
3163 *      @context: buffer for stored context
3164 *
3165 *      Reload the register configuration from the velocity context
3166 *      created by velocity_save_context.
3167 */
3168static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3169{
3170        struct mac_regs __iomem *regs = vptr->mac_regs;
3171        int i;
3172        u8 __iomem *ptr = (u8 __iomem *)regs;
3173
3174        for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3175                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3176
3177        /* Just skip cr0 */
3178        for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3179                /* Clear */
3180                writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3181                /* Set */
3182                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3183        }
3184
3185        for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3186                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3187
3188        for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3189                writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3190
3191        for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3192                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3193}
3194
3195static int velocity_resume(struct device *dev)
3196{
3197        struct net_device *netdev = dev_get_drvdata(dev);
3198        struct velocity_info *vptr = netdev_priv(netdev);
3199        unsigned long flags;
3200        int i;
3201
3202        if (!netif_running(vptr->netdev))
3203                return 0;
3204
3205        velocity_set_power_state(vptr, PCI_D0);
3206
3207        if (vptr->pdev) {
3208                pci_enable_wake(vptr->pdev, PCI_D0, 0);
3209                pci_restore_state(vptr->pdev);
3210        }
3211
3212        mac_wol_reset(vptr->mac_regs);
3213
3214        spin_lock_irqsave(&vptr->lock, flags);
3215        velocity_restore_context(vptr, &vptr->context);
3216        velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3217        mac_disable_int(vptr->mac_regs);
3218
3219        velocity_tx_srv(vptr);
3220
3221        for (i = 0; i < vptr->tx.numq; i++) {
3222                if (vptr->tx.used[i])
3223                        mac_tx_queue_wake(vptr->mac_regs, i);
3224        }
3225
3226        mac_enable_int(vptr->mac_regs);
3227        spin_unlock_irqrestore(&vptr->lock, flags);
3228        netif_device_attach(vptr->netdev);
3229
3230        return 0;
3231}
3232#endif  /* CONFIG_PM_SLEEP */
3233
3234static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3235
3236/*
3237 *      Definition for our device driver. The PCI layer interface
3238 *      uses this to handle all our card discover and plugging
3239 */
3240static struct pci_driver velocity_pci_driver = {
3241        .name           = VELOCITY_NAME,
3242        .id_table       = velocity_pci_id_table,
3243        .probe          = velocity_pci_probe,
3244        .remove         = velocity_pci_remove,
3245        .driver = {
3246                .pm = &velocity_pm_ops,
3247        },
3248};
3249
3250static struct platform_driver velocity_platform_driver = {
3251        .probe          = velocity_platform_probe,
3252        .remove         = velocity_platform_remove,
3253        .driver = {
3254                .name = "via-velocity",
3255                .of_match_table = velocity_of_ids,
3256                .pm = &velocity_pm_ops,
3257        },
3258};
3259
3260/**
3261 *      velocity_ethtool_up     -       pre hook for ethtool
3262 *      @dev: network device
3263 *
3264 *      Called before an ethtool operation. We need to make sure the
3265 *      chip is out of D3 state before we poke at it. In case of ethtool
3266 *      ops nesting, only wake the device up in the outermost block.
3267 */
3268static int velocity_ethtool_up(struct net_device *dev)
3269{
3270        struct velocity_info *vptr = netdev_priv(dev);
3271
3272        if (vptr->ethtool_ops_nesting == U32_MAX)
3273                return -EBUSY;
3274        if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3275                velocity_set_power_state(vptr, PCI_D0);
3276        return 0;
3277}
3278
3279/**
3280 *      velocity_ethtool_down   -       post hook for ethtool
3281 *      @dev: network device
3282 *
3283 *      Called after an ethtool operation. Restore the chip back to D3
3284 *      state if it isn't running. In case of ethtool ops nesting, only
3285 *      put the device to sleep in the outermost block.
3286 */
3287static void velocity_ethtool_down(struct net_device *dev)
3288{
3289        struct velocity_info *vptr = netdev_priv(dev);
3290
3291        if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3292                velocity_set_power_state(vptr, PCI_D3hot);
3293}
3294
3295static int velocity_get_link_ksettings(struct net_device *dev,
3296                                       struct ethtool_link_ksettings *cmd)
3297{
3298        struct velocity_info *vptr = netdev_priv(dev);
3299        struct mac_regs __iomem *regs = vptr->mac_regs;
3300        u32 status;
3301        u32 supported, advertising;
3302
3303        status = check_connection_type(vptr->mac_regs);
3304
3305        supported = SUPPORTED_TP |
3306                        SUPPORTED_Autoneg |
3307                        SUPPORTED_10baseT_Half |
3308                        SUPPORTED_10baseT_Full |
3309                        SUPPORTED_100baseT_Half |
3310                        SUPPORTED_100baseT_Full |
3311                        SUPPORTED_1000baseT_Half |
3312                        SUPPORTED_1000baseT_Full;
3313
3314        advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3315        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3316                advertising |=
3317                        ADVERTISED_10baseT_Half |
3318                        ADVERTISED_10baseT_Full |
3319                        ADVERTISED_100baseT_Half |
3320                        ADVERTISED_100baseT_Full |
3321                        ADVERTISED_1000baseT_Half |
3322                        ADVERTISED_1000baseT_Full;
3323        } else {
3324                switch (vptr->options.spd_dpx) {
3325                case SPD_DPX_1000_FULL:
3326                        advertising |= ADVERTISED_1000baseT_Full;
3327                        break;
3328                case SPD_DPX_100_HALF:
3329                        advertising |= ADVERTISED_100baseT_Half;
3330                        break;
3331                case SPD_DPX_100_FULL:
3332                        advertising |= ADVERTISED_100baseT_Full;
3333                        break;
3334                case SPD_DPX_10_HALF:
3335                        advertising |= ADVERTISED_10baseT_Half;
3336                        break;
3337                case SPD_DPX_10_FULL:
3338                        advertising |= ADVERTISED_10baseT_Full;
3339                        break;
3340                default:
3341                        break;
3342                }
3343        }
3344
3345        if (status & VELOCITY_SPEED_1000)
3346                cmd->base.speed = SPEED_1000;
3347        else if (status & VELOCITY_SPEED_100)
3348                cmd->base.speed = SPEED_100;
3349        else
3350                cmd->base.speed = SPEED_10;
3351
3352        cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3353                AUTONEG_ENABLE : AUTONEG_DISABLE;
3354        cmd->base.port = PORT_TP;
3355        cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3356
3357        if (status & VELOCITY_DUPLEX_FULL)
3358                cmd->base.duplex = DUPLEX_FULL;
3359        else
3360                cmd->base.duplex = DUPLEX_HALF;
3361
3362        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3363                                                supported);
3364        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3365                                                advertising);
3366
3367        return 0;
3368}
3369
3370static int velocity_set_link_ksettings(struct net_device *dev,
3371                                       const struct ethtool_link_ksettings *cmd)
3372{
3373        struct velocity_info *vptr = netdev_priv(dev);
3374        u32 speed = cmd->base.speed;
3375        u32 curr_status;
3376        u32 new_status = 0;
3377        int ret = 0;
3378
3379        curr_status = check_connection_type(vptr->mac_regs);
3380        curr_status &= (~VELOCITY_LINK_FAIL);
3381
3382        new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3383        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3384        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3385        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3386        new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3387                       VELOCITY_DUPLEX_FULL : 0);
3388
3389        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3390            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3391                ret = -EINVAL;
3392        } else {
3393                enum speed_opt spd_dpx;
3394
3395                if (new_status & VELOCITY_AUTONEG_ENABLE)
3396                        spd_dpx = SPD_DPX_AUTO;
3397                else if ((new_status & VELOCITY_SPEED_1000) &&
3398                         (new_status & VELOCITY_DUPLEX_FULL)) {
3399                        spd_dpx = SPD_DPX_1000_FULL;
3400                } else if (new_status & VELOCITY_SPEED_100)
3401                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3402                                SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3403                else if (new_status & VELOCITY_SPEED_10)
3404                        spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3405                                SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3406                else
3407                        return -EOPNOTSUPP;
3408
3409                vptr->options.spd_dpx = spd_dpx;
3410
3411                velocity_set_media_mode(vptr, new_status);
3412        }
3413
3414        return ret;
3415}
3416
3417static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3418{
3419        struct velocity_info *vptr = netdev_priv(dev);
3420
3421        strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3422        strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3423        if (vptr->pdev)
3424                strlcpy(info->bus_info, pci_name(vptr->pdev),
3425                                                sizeof(info->bus_info));
3426        else
3427                strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3428}
3429
3430static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3431{
3432        struct velocity_info *vptr = netdev_priv(dev);
3433        wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3434        wol->wolopts |= WAKE_MAGIC;
3435        /*
3436           if (vptr->wol_opts & VELOCITY_WOL_PHY)
3437                   wol.wolopts|=WAKE_PHY;
3438                         */
3439        if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3440                wol->wolopts |= WAKE_UCAST;
3441        if (vptr->wol_opts & VELOCITY_WOL_ARP)
3442                wol->wolopts |= WAKE_ARP;
3443        memcpy(&wol->sopass, vptr->wol_passwd, 6);
3444}
3445
3446static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3447{
3448        struct velocity_info *vptr = netdev_priv(dev);
3449
3450        if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3451                return -EFAULT;
3452        vptr->wol_opts = VELOCITY_WOL_MAGIC;
3453
3454        /*
3455           if (wol.wolopts & WAKE_PHY) {
3456           vptr->wol_opts|=VELOCITY_WOL_PHY;
3457           vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3458           }
3459         */
3460
3461        if (wol->wolopts & WAKE_MAGIC) {
3462                vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3463                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3464        }
3465        if (wol->wolopts & WAKE_UCAST) {
3466                vptr->wol_opts |= VELOCITY_WOL_UCAST;
3467                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3468        }
3469        if (wol->wolopts & WAKE_ARP) {
3470                vptr->wol_opts |= VELOCITY_WOL_ARP;
3471                vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3472        }
3473        memcpy(vptr->wol_passwd, wol->sopass, 6);
3474        return 0;
3475}
3476
3477static int get_pending_timer_val(int val)
3478{
3479        int mult_bits = val >> 6;
3480        int mult = 1;
3481
3482        switch (mult_bits)
3483        {
3484        case 1:
3485                mult = 4; break;
3486        case 2:
3487                mult = 16; break;
3488        case 3:
3489                mult = 64; break;
3490        case 0:
3491        default:
3492                break;
3493        }
3494
3495        return (val & 0x3f) * mult;
3496}
3497
3498static void set_pending_timer_val(int *val, u32 us)
3499{
3500        u8 mult = 0;
3501        u8 shift = 0;
3502
3503        if (us >= 0x3f) {
3504                mult = 1; /* mult with 4 */
3505                shift = 2;
3506        }
3507        if (us >= 0x3f * 4) {
3508                mult = 2; /* mult with 16 */
3509                shift = 4;
3510        }
3511        if (us >= 0x3f * 16) {
3512                mult = 3; /* mult with 64 */
3513                shift = 6;
3514        }
3515
3516        *val = (mult << 6) | ((us >> shift) & 0x3f);
3517}
3518
3519
3520static int velocity_get_coalesce(struct net_device *dev,
3521                                 struct ethtool_coalesce *ecmd,
3522                                 struct kernel_ethtool_coalesce *kernel_coal,
3523                                 struct netlink_ext_ack *extack)
3524{
3525        struct velocity_info *vptr = netdev_priv(dev);
3526
3527        ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3528        ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3529
3530        ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3531        ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3532
3533        return 0;
3534}
3535
3536static int velocity_set_coalesce(struct net_device *dev,
3537                                 struct ethtool_coalesce *ecmd,
3538                                 struct kernel_ethtool_coalesce *kernel_coal,
3539                                 struct netlink_ext_ack *extack)
3540{
3541        struct velocity_info *vptr = netdev_priv(dev);
3542        int max_us = 0x3f * 64;
3543        unsigned long flags;
3544
3545        /* 6 bits of  */
3546        if (ecmd->tx_coalesce_usecs > max_us)
3547                return -EINVAL;
3548        if (ecmd->rx_coalesce_usecs > max_us)
3549                return -EINVAL;
3550
3551        if (ecmd->tx_max_coalesced_frames > 0xff)
3552                return -EINVAL;
3553        if (ecmd->rx_max_coalesced_frames > 0xff)
3554                return -EINVAL;
3555
3556        vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3557        vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3558
3559        set_pending_timer_val(&vptr->options.rxqueue_timer,
3560                        ecmd->rx_coalesce_usecs);
3561        set_pending_timer_val(&vptr->options.txqueue_timer,
3562                        ecmd->tx_coalesce_usecs);
3563
3564        /* Setup the interrupt suppression and queue timers */
3565        spin_lock_irqsave(&vptr->lock, flags);
3566        mac_disable_int(vptr->mac_regs);
3567        setup_adaptive_interrupts(vptr);
3568        setup_queue_timers(vptr);
3569
3570        mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3571        mac_clear_isr(vptr->mac_regs);
3572        mac_enable_int(vptr->mac_regs);
3573        spin_unlock_irqrestore(&vptr->lock, flags);
3574
3575        return 0;
3576}
3577
3578static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3579        "rx_all",
3580        "rx_ok",
3581        "tx_ok",
3582        "rx_error",
3583        "rx_runt_ok",
3584        "rx_runt_err",
3585        "rx_64",
3586        "tx_64",
3587        "rx_65_to_127",
3588        "tx_65_to_127",
3589        "rx_128_to_255",
3590        "tx_128_to_255",
3591        "rx_256_to_511",
3592        "tx_256_to_511",
3593        "rx_512_to_1023",
3594        "tx_512_to_1023",
3595        "rx_1024_to_1518",
3596        "tx_1024_to_1518",
3597        "tx_ether_collisions",
3598        "rx_crc_errors",
3599        "rx_jumbo",
3600        "tx_jumbo",
3601        "rx_mac_control_frames",
3602        "tx_mac_control_frames",
3603        "rx_frame_alignment_errors",
3604        "rx_long_ok",
3605        "rx_long_err",
3606        "tx_sqe_errors",
3607        "rx_no_buf",
3608        "rx_symbol_errors",
3609        "in_range_length_errors",
3610        "late_collisions"
3611};
3612
3613static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3614{
3615        switch (sset) {
3616        case ETH_SS_STATS:
3617                memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3618                break;
3619        }
3620}
3621
3622static int velocity_get_sset_count(struct net_device *dev, int sset)
3623{
3624        switch (sset) {
3625        case ETH_SS_STATS:
3626                return ARRAY_SIZE(velocity_gstrings);
3627        default:
3628                return -EOPNOTSUPP;
3629        }
3630}
3631
3632static void velocity_get_ethtool_stats(struct net_device *dev,
3633                                       struct ethtool_stats *stats, u64 *data)
3634{
3635        if (netif_running(dev)) {
3636                struct velocity_info *vptr = netdev_priv(dev);
3637                u32 *p = vptr->mib_counter;
3638                int i;
3639
3640                spin_lock_irq(&vptr->lock);
3641                velocity_update_hw_mibs(vptr);
3642                spin_unlock_irq(&vptr->lock);
3643
3644                for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3645                        *data++ = *p++;
3646        }
3647}
3648
3649static const struct ethtool_ops velocity_ethtool_ops = {
3650        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3651                                     ETHTOOL_COALESCE_MAX_FRAMES,
3652        .get_drvinfo            = velocity_get_drvinfo,
3653        .get_wol                = velocity_ethtool_get_wol,
3654        .set_wol                = velocity_ethtool_set_wol,
3655        .get_link               = velocity_get_link,
3656        .get_strings            = velocity_get_strings,
3657        .get_sset_count         = velocity_get_sset_count,
3658        .get_ethtool_stats      = velocity_get_ethtool_stats,
3659        .get_coalesce           = velocity_get_coalesce,
3660        .set_coalesce           = velocity_set_coalesce,
3661        .begin                  = velocity_ethtool_up,
3662        .complete               = velocity_ethtool_down,
3663        .get_link_ksettings     = velocity_get_link_ksettings,
3664        .set_link_ksettings     = velocity_set_link_ksettings,
3665};
3666
3667#if defined(CONFIG_PM) && defined(CONFIG_INET)
3668static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3669{
3670        struct in_ifaddr *ifa = ptr;
3671        struct net_device *dev = ifa->ifa_dev->dev;
3672
3673        if (dev_net(dev) == &init_net &&
3674            dev->netdev_ops == &velocity_netdev_ops)
3675                velocity_get_ip(netdev_priv(dev));
3676
3677        return NOTIFY_DONE;
3678}
3679
3680static struct notifier_block velocity_inetaddr_notifier = {
3681        .notifier_call  = velocity_netdev_event,
3682};
3683
3684static void velocity_register_notifier(void)
3685{
3686        register_inetaddr_notifier(&velocity_inetaddr_notifier);
3687}
3688
3689static void velocity_unregister_notifier(void)
3690{
3691        unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3692}
3693
3694#else
3695
3696#define velocity_register_notifier()    do {} while (0)
3697#define velocity_unregister_notifier()  do {} while (0)
3698
3699#endif  /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3700
3701/**
3702 *      velocity_init_module    -       load time function
3703 *
3704 *      Called when the velocity module is loaded. The PCI driver
3705 *      is registered with the PCI layer, and in turn will call
3706 *      the probe functions for each velocity adapter installed
3707 *      in the system.
3708 */
3709static int __init velocity_init_module(void)
3710{
3711        int ret_pci, ret_platform;
3712
3713        velocity_register_notifier();
3714
3715        ret_pci = pci_register_driver(&velocity_pci_driver);
3716        ret_platform = platform_driver_register(&velocity_platform_driver);
3717
3718        /* if both_registers failed, remove the notifier */
3719        if ((ret_pci < 0) && (ret_platform < 0)) {
3720                velocity_unregister_notifier();
3721                return ret_pci;
3722        }
3723
3724        return 0;
3725}
3726
3727/**
3728 *      velocity_cleanup_module         -       module unload
3729 *
3730 *      When the velocity hardware is unloaded this function is called.
3731 *      It will clean up the notifiers and the unregister the PCI
3732 *      driver interface for this hardware. This in turn cleans up
3733 *      all discovered interfaces before returning from the function
3734 */
3735static void __exit velocity_cleanup_module(void)
3736{
3737        velocity_unregister_notifier();
3738
3739        pci_unregister_driver(&velocity_pci_driver);
3740        platform_driver_unregister(&velocity_platform_driver);
3741}
3742
3743module_init(velocity_init_module);
3744module_exit(velocity_cleanup_module);
3745