linux/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
<<
>>
Prefs
   1/*****************************************************************************
   2 *                                                                           *
   3 * File: cxgb2.c                                                             *
   4 * $Revision: 1.25 $                                                         *
   5 * $Date: 2005/06/22 00:43:25 $                                              *
   6 * Description:                                                              *
   7 *  Chelsio 10Gb Ethernet Driver.                                            *
   8 *                                                                           *
   9 * This program is free software; you can redistribute it and/or modify      *
  10 * it under the terms of the GNU General Public License, version 2, as       *
  11 * published by the Free Software Foundation.                                *
  12 *                                                                           *
  13 * You should have received a copy of the GNU General Public License along   *
  14 * with this program; if not, write to the Free Software Foundation, Inc.,   *
  15 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
  16 *                                                                           *
  17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
  19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
  20 *                                                                           *
  21 * http://www.chelsio.com                                                    *
  22 *                                                                           *
  23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
  24 * All rights reserved.                                                      *
  25 *                                                                           *
  26 * Maintainers: maintainers@chelsio.com                                      *
  27 *                                                                           *
  28 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
  29 *          Tina Yang               <tainay@chelsio.com>                     *
  30 *          Felix Marti             <felix@chelsio.com>                      *
  31 *          Scott Bardone           <sbardone@chelsio.com>                   *
  32 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
  33 *          Frank DiMambro          <frank@chelsio.com>                      *
  34 *                                                                           *
  35 * History:                                                                  *
  36 *                                                                           *
  37 ****************************************************************************/
  38
  39#include "common.h"
  40#include <linux/module.h>
  41#include <linux/init.h>
  42#include <linux/pci.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/if_vlan.h>
  46#include <linux/mii.h>
  47#include <linux/sockios.h>
  48#include <linux/dma-mapping.h>
  49#include <asm/uaccess.h>
  50
  51#include "cpl5_cmd.h"
  52#include "regs.h"
  53#include "gmac.h"
  54#include "cphy.h"
  55#include "sge.h"
  56#include "tp.h"
  57#include "espi.h"
  58#include "elmer0.h"
  59
  60#include <linux/workqueue.h>
  61
  62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
  63{
  64        schedule_delayed_work(&ap->stats_update_task, secs * HZ);
  65}
  66
  67static inline void cancel_mac_stats_update(struct adapter *ap)
  68{
  69        cancel_delayed_work(&ap->stats_update_task);
  70}
  71
  72#define MAX_CMDQ_ENTRIES        16384
  73#define MAX_CMDQ1_ENTRIES       1024
  74#define MAX_RX_BUFFERS          16384
  75#define MAX_RX_JUMBO_BUFFERS    16384
  76#define MAX_TX_BUFFERS_HIGH     16384U
  77#define MAX_TX_BUFFERS_LOW      1536U
  78#define MAX_TX_BUFFERS          1460U
  79#define MIN_FL_ENTRIES          32
  80
  81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  82                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  83                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  84
  85/*
  86 * The EEPROM is actually bigger but only the first few bytes are used so we
  87 * only report those.
  88 */
  89#define EEPROM_SIZE 32
  90
  91MODULE_DESCRIPTION(DRV_DESCRIPTION);
  92MODULE_AUTHOR("Chelsio Communications");
  93MODULE_LICENSE("GPL");
  94
  95static int dflt_msg_enable = DFLT_MSG_ENABLE;
  96
  97module_param(dflt_msg_enable, int, 0);
  98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
  99
 100#define HCLOCK 0x0
 101#define LCLOCK 0x1
 102
 103/* T1 cards powersave mode */
 104static int t1_clock(struct adapter *adapter, int mode);
 105static int t1powersave = 1;     /* HW default is powersave mode. */
 106
 107module_param(t1powersave, int, 0);
 108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
 109
 110static int disable_msi = 0;
 111module_param(disable_msi, int, 0);
 112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 113
 114static const char pci_speed[][4] = {
 115        "33", "66", "100", "133"
 116};
 117
 118/*
 119 * Setup MAC to receive the types of packets we want.
 120 */
 121static void t1_set_rxmode(struct net_device *dev)
 122{
 123        struct adapter *adapter = dev->ml_priv;
 124        struct cmac *mac = adapter->port[dev->if_port].mac;
 125        struct t1_rx_mode rm;
 126
 127        rm.dev = dev;
 128        mac->ops->set_rx_mode(mac, &rm);
 129}
 130
 131static void link_report(struct port_info *p)
 132{
 133        if (!netif_carrier_ok(p->dev))
 134                printk(KERN_INFO "%s: link down\n", p->dev->name);
 135        else {
 136                const char *s = "10Mbps";
 137
 138                switch (p->link_config.speed) {
 139                        case SPEED_10000: s = "10Gbps"; break;
 140                        case SPEED_1000:  s = "1000Mbps"; break;
 141                        case SPEED_100:   s = "100Mbps"; break;
 142                }
 143
 144                printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
 145                       p->dev->name, s,
 146                       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 147        }
 148}
 149
 150void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
 151                        int speed, int duplex, int pause)
 152{
 153        struct port_info *p = &adapter->port[port_id];
 154
 155        if (link_stat != netif_carrier_ok(p->dev)) {
 156                if (link_stat)
 157                        netif_carrier_on(p->dev);
 158                else
 159                        netif_carrier_off(p->dev);
 160                link_report(p);
 161
 162                /* multi-ports: inform toe */
 163                if ((speed > 0) && (adapter->params.nports > 1)) {
 164                        unsigned int sched_speed = 10;
 165                        switch (speed) {
 166                        case SPEED_1000:
 167                                sched_speed = 1000;
 168                                break;
 169                        case SPEED_100:
 170                                sched_speed = 100;
 171                                break;
 172                        case SPEED_10:
 173                                sched_speed = 10;
 174                                break;
 175                        }
 176                        t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
 177                }
 178        }
 179}
 180
 181static void link_start(struct port_info *p)
 182{
 183        struct cmac *mac = p->mac;
 184
 185        mac->ops->reset(mac);
 186        if (mac->ops->macaddress_set)
 187                mac->ops->macaddress_set(mac, p->dev->dev_addr);
 188        t1_set_rxmode(p->dev);
 189        t1_link_start(p->phy, mac, &p->link_config);
 190        mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 191}
 192
 193static void enable_hw_csum(struct adapter *adapter)
 194{
 195        if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
 196                t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
 197        t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
 198}
 199
 200/*
 201 * Things to do upon first use of a card.
 202 * This must run with the rtnl lock held.
 203 */
 204static int cxgb_up(struct adapter *adapter)
 205{
 206        int err = 0;
 207
 208        if (!(adapter->flags & FULL_INIT_DONE)) {
 209                err = t1_init_hw_modules(adapter);
 210                if (err)
 211                        goto out_err;
 212
 213                enable_hw_csum(adapter);
 214                adapter->flags |= FULL_INIT_DONE;
 215        }
 216
 217        t1_interrupts_clear(adapter);
 218
 219        adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
 220        err = request_irq(adapter->pdev->irq, t1_interrupt,
 221                          adapter->params.has_msi ? 0 : IRQF_SHARED,
 222                          adapter->name, adapter);
 223        if (err) {
 224                if (adapter->params.has_msi)
 225                        pci_disable_msi(adapter->pdev);
 226
 227                goto out_err;
 228        }
 229
 230        t1_sge_start(adapter->sge);
 231        t1_interrupts_enable(adapter);
 232out_err:
 233        return err;
 234}
 235
 236/*
 237 * Release resources when all the ports have been stopped.
 238 */
 239static void cxgb_down(struct adapter *adapter)
 240{
 241        t1_sge_stop(adapter->sge);
 242        t1_interrupts_disable(adapter);
 243        free_irq(adapter->pdev->irq, adapter);
 244        if (adapter->params.has_msi)
 245                pci_disable_msi(adapter->pdev);
 246}
 247
 248static int cxgb_open(struct net_device *dev)
 249{
 250        int err;
 251        struct adapter *adapter = dev->ml_priv;
 252        int other_ports = adapter->open_device_map & PORT_MASK;
 253
 254        napi_enable(&adapter->napi);
 255        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
 256                napi_disable(&adapter->napi);
 257                return err;
 258        }
 259
 260        __set_bit(dev->if_port, &adapter->open_device_map);
 261        link_start(&adapter->port[dev->if_port]);
 262        netif_start_queue(dev);
 263        if (!other_ports && adapter->params.stats_update_period)
 264                schedule_mac_stats_update(adapter,
 265                                          adapter->params.stats_update_period);
 266
 267        t1_vlan_mode(adapter, dev->features);
 268        return 0;
 269}
 270
 271static int cxgb_close(struct net_device *dev)
 272{
 273        struct adapter *adapter = dev->ml_priv;
 274        struct port_info *p = &adapter->port[dev->if_port];
 275        struct cmac *mac = p->mac;
 276
 277        netif_stop_queue(dev);
 278        napi_disable(&adapter->napi);
 279        mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 280        netif_carrier_off(dev);
 281
 282        clear_bit(dev->if_port, &adapter->open_device_map);
 283        if (adapter->params.stats_update_period &&
 284            !(adapter->open_device_map & PORT_MASK)) {
 285                /* Stop statistics accumulation. */
 286                smp_mb__after_clear_bit();
 287                spin_lock(&adapter->work_lock);   /* sync with update task */
 288                spin_unlock(&adapter->work_lock);
 289                cancel_mac_stats_update(adapter);
 290        }
 291
 292        if (!adapter->open_device_map)
 293                cxgb_down(adapter);
 294        return 0;
 295}
 296
 297static struct net_device_stats *t1_get_stats(struct net_device *dev)
 298{
 299        struct adapter *adapter = dev->ml_priv;
 300        struct port_info *p = &adapter->port[dev->if_port];
 301        struct net_device_stats *ns = &p->netstats;
 302        const struct cmac_statistics *pstats;
 303
 304        /* Do a full update of the MAC stats */
 305        pstats = p->mac->ops->statistics_update(p->mac,
 306                                                MAC_STATS_UPDATE_FULL);
 307
 308        ns->tx_packets = pstats->TxUnicastFramesOK +
 309                pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
 310
 311        ns->rx_packets = pstats->RxUnicastFramesOK +
 312                pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
 313
 314        ns->tx_bytes = pstats->TxOctetsOK;
 315        ns->rx_bytes = pstats->RxOctetsOK;
 316
 317        ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
 318                pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
 319        ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
 320                pstats->RxFCSErrors + pstats->RxAlignErrors +
 321                pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
 322                pstats->RxSymbolErrors + pstats->RxRuntErrors;
 323
 324        ns->multicast  = pstats->RxMulticastFramesOK;
 325        ns->collisions = pstats->TxTotalCollisions;
 326
 327        /* detailed rx_errors */
 328        ns->rx_length_errors = pstats->RxFrameTooLongErrors +
 329                pstats->RxJabberErrors;
 330        ns->rx_over_errors   = 0;
 331        ns->rx_crc_errors    = pstats->RxFCSErrors;
 332        ns->rx_frame_errors  = pstats->RxAlignErrors;
 333        ns->rx_fifo_errors   = 0;
 334        ns->rx_missed_errors = 0;
 335
 336        /* detailed tx_errors */
 337        ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
 338        ns->tx_carrier_errors   = 0;
 339        ns->tx_fifo_errors      = pstats->TxUnderrun;
 340        ns->tx_heartbeat_errors = 0;
 341        ns->tx_window_errors    = pstats->TxLateCollisions;
 342        return ns;
 343}
 344
 345static u32 get_msglevel(struct net_device *dev)
 346{
 347        struct adapter *adapter = dev->ml_priv;
 348
 349        return adapter->msg_enable;
 350}
 351
 352static void set_msglevel(struct net_device *dev, u32 val)
 353{
 354        struct adapter *adapter = dev->ml_priv;
 355
 356        adapter->msg_enable = val;
 357}
 358
 359static char stats_strings[][ETH_GSTRING_LEN] = {
 360        "TxOctetsOK",
 361        "TxOctetsBad",
 362        "TxUnicastFramesOK",
 363        "TxMulticastFramesOK",
 364        "TxBroadcastFramesOK",
 365        "TxPauseFrames",
 366        "TxFramesWithDeferredXmissions",
 367        "TxLateCollisions",
 368        "TxTotalCollisions",
 369        "TxFramesAbortedDueToXSCollisions",
 370        "TxUnderrun",
 371        "TxLengthErrors",
 372        "TxInternalMACXmitError",
 373        "TxFramesWithExcessiveDeferral",
 374        "TxFCSErrors",
 375        "TxJumboFramesOk",
 376        "TxJumboOctetsOk",
 377        
 378        "RxOctetsOK",
 379        "RxOctetsBad",
 380        "RxUnicastFramesOK",
 381        "RxMulticastFramesOK",
 382        "RxBroadcastFramesOK",
 383        "RxPauseFrames",
 384        "RxFCSErrors",
 385        "RxAlignErrors",
 386        "RxSymbolErrors",
 387        "RxDataErrors",
 388        "RxSequenceErrors",
 389        "RxRuntErrors",
 390        "RxJabberErrors",
 391        "RxInternalMACRcvError",
 392        "RxInRangeLengthErrors",
 393        "RxOutOfRangeLengthField",
 394        "RxFrameTooLongErrors",
 395        "RxJumboFramesOk",
 396        "RxJumboOctetsOk",
 397
 398        /* Port stats */
 399        "RxCsumGood",
 400        "TxCsumOffload",
 401        "TxTso",
 402        "RxVlan",
 403        "TxVlan",
 404        "TxNeedHeadroom", 
 405        
 406        /* Interrupt stats */
 407        "rx drops",
 408        "pure_rsps",
 409        "unhandled irqs",
 410        "respQ_empty",
 411        "respQ_overflow",
 412        "freelistQ_empty",
 413        "pkt_too_big",
 414        "pkt_mismatch",
 415        "cmdQ_full0",
 416        "cmdQ_full1",
 417
 418        "espi_DIP2ParityErr",
 419        "espi_DIP4Err",
 420        "espi_RxDrops",
 421        "espi_TxDrops",
 422        "espi_RxOvfl",
 423        "espi_ParityErr"
 424};
 425
 426#define T2_REGMAP_SIZE (3 * 1024)
 427
 428static int get_regs_len(struct net_device *dev)
 429{
 430        return T2_REGMAP_SIZE;
 431}
 432
 433static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 434{
 435        struct adapter *adapter = dev->ml_priv;
 436
 437        strcpy(info->driver, DRV_NAME);
 438        strcpy(info->version, DRV_VERSION);
 439        strcpy(info->fw_version, "N/A");
 440        strcpy(info->bus_info, pci_name(adapter->pdev));
 441}
 442
 443static int get_sset_count(struct net_device *dev, int sset)
 444{
 445        switch (sset) {
 446        case ETH_SS_STATS:
 447                return ARRAY_SIZE(stats_strings);
 448        default:
 449                return -EOPNOTSUPP;
 450        }
 451}
 452
 453static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 454{
 455        if (stringset == ETH_SS_STATS)
 456                memcpy(data, stats_strings, sizeof(stats_strings));
 457}
 458
 459static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 460                      u64 *data)
 461{
 462        struct adapter *adapter = dev->ml_priv;
 463        struct cmac *mac = adapter->port[dev->if_port].mac;
 464        const struct cmac_statistics *s;
 465        const struct sge_intr_counts *t;
 466        struct sge_port_stats ss;
 467
 468        s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 469        t = t1_sge_get_intr_counts(adapter->sge);
 470        t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
 471
 472        *data++ = s->TxOctetsOK;
 473        *data++ = s->TxOctetsBad;
 474        *data++ = s->TxUnicastFramesOK;
 475        *data++ = s->TxMulticastFramesOK;
 476        *data++ = s->TxBroadcastFramesOK;
 477        *data++ = s->TxPauseFrames;
 478        *data++ = s->TxFramesWithDeferredXmissions;
 479        *data++ = s->TxLateCollisions;
 480        *data++ = s->TxTotalCollisions;
 481        *data++ = s->TxFramesAbortedDueToXSCollisions;
 482        *data++ = s->TxUnderrun;
 483        *data++ = s->TxLengthErrors;
 484        *data++ = s->TxInternalMACXmitError;
 485        *data++ = s->TxFramesWithExcessiveDeferral;
 486        *data++ = s->TxFCSErrors;
 487        *data++ = s->TxJumboFramesOK;
 488        *data++ = s->TxJumboOctetsOK;
 489
 490        *data++ = s->RxOctetsOK;
 491        *data++ = s->RxOctetsBad;
 492        *data++ = s->RxUnicastFramesOK;
 493        *data++ = s->RxMulticastFramesOK;
 494        *data++ = s->RxBroadcastFramesOK;
 495        *data++ = s->RxPauseFrames;
 496        *data++ = s->RxFCSErrors;
 497        *data++ = s->RxAlignErrors;
 498        *data++ = s->RxSymbolErrors;
 499        *data++ = s->RxDataErrors;
 500        *data++ = s->RxSequenceErrors;
 501        *data++ = s->RxRuntErrors;
 502        *data++ = s->RxJabberErrors;
 503        *data++ = s->RxInternalMACRcvError;
 504        *data++ = s->RxInRangeLengthErrors;
 505        *data++ = s->RxOutOfRangeLengthField;
 506        *data++ = s->RxFrameTooLongErrors;
 507        *data++ = s->RxJumboFramesOK;
 508        *data++ = s->RxJumboOctetsOK;
 509
 510        *data++ = ss.rx_cso_good;
 511        *data++ = ss.tx_cso;
 512        *data++ = ss.tx_tso;
 513        *data++ = ss.vlan_xtract;
 514        *data++ = ss.vlan_insert;
 515        *data++ = ss.tx_need_hdrroom;
 516        
 517        *data++ = t->rx_drops;
 518        *data++ = t->pure_rsps;
 519        *data++ = t->unhandled_irqs;
 520        *data++ = t->respQ_empty;
 521        *data++ = t->respQ_overflow;
 522        *data++ = t->freelistQ_empty;
 523        *data++ = t->pkt_too_big;
 524        *data++ = t->pkt_mismatch;
 525        *data++ = t->cmdQ_full[0];
 526        *data++ = t->cmdQ_full[1];
 527
 528        if (adapter->espi) {
 529                const struct espi_intr_counts *e;
 530
 531                e = t1_espi_get_intr_counts(adapter->espi);
 532                *data++ = e->DIP2_parity_err;
 533                *data++ = e->DIP4_err;
 534                *data++ = e->rx_drops;
 535                *data++ = e->tx_drops;
 536                *data++ = e->rx_ovflw;
 537                *data++ = e->parity_err;
 538        }
 539}
 540
 541static inline void reg_block_dump(struct adapter *ap, void *buf,
 542                                  unsigned int start, unsigned int end)
 543{
 544        u32 *p = buf + start;
 545
 546        for ( ; start <= end; start += sizeof(u32))
 547                *p++ = readl(ap->regs + start);
 548}
 549
 550static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 551                     void *buf)
 552{
 553        struct adapter *ap = dev->ml_priv;
 554
 555        /*
 556         * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
 557         */
 558        regs->version = 2;
 559
 560        memset(buf, 0, T2_REGMAP_SIZE);
 561        reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
 562        reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
 563        reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
 564        reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
 565        reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
 566        reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
 567        reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
 568        reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
 569        reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
 570        reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
 571}
 572
 573static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 574{
 575        struct adapter *adapter = dev->ml_priv;
 576        struct port_info *p = &adapter->port[dev->if_port];
 577
 578        cmd->supported = p->link_config.supported;
 579        cmd->advertising = p->link_config.advertising;
 580
 581        if (netif_carrier_ok(dev)) {
 582                ethtool_cmd_speed_set(cmd, p->link_config.speed);
 583                cmd->duplex = p->link_config.duplex;
 584        } else {
 585                ethtool_cmd_speed_set(cmd, -1);
 586                cmd->duplex = -1;
 587        }
 588
 589        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 590        cmd->phy_address = p->phy->mdio.prtad;
 591        cmd->transceiver = XCVR_EXTERNAL;
 592        cmd->autoneg = p->link_config.autoneg;
 593        cmd->maxtxpkt = 0;
 594        cmd->maxrxpkt = 0;
 595        return 0;
 596}
 597
 598static int speed_duplex_to_caps(int speed, int duplex)
 599{
 600        int cap = 0;
 601
 602        switch (speed) {
 603        case SPEED_10:
 604                if (duplex == DUPLEX_FULL)
 605                        cap = SUPPORTED_10baseT_Full;
 606                else
 607                        cap = SUPPORTED_10baseT_Half;
 608                break;
 609        case SPEED_100:
 610                if (duplex == DUPLEX_FULL)
 611                        cap = SUPPORTED_100baseT_Full;
 612                else
 613                        cap = SUPPORTED_100baseT_Half;
 614                break;
 615        case SPEED_1000:
 616                if (duplex == DUPLEX_FULL)
 617                        cap = SUPPORTED_1000baseT_Full;
 618                else
 619                        cap = SUPPORTED_1000baseT_Half;
 620                break;
 621        case SPEED_10000:
 622                if (duplex == DUPLEX_FULL)
 623                        cap = SUPPORTED_10000baseT_Full;
 624        }
 625        return cap;
 626}
 627
 628#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 629                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 630                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 631                      ADVERTISED_10000baseT_Full)
 632
 633static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 634{
 635        struct adapter *adapter = dev->ml_priv;
 636        struct port_info *p = &adapter->port[dev->if_port];
 637        struct link_config *lc = &p->link_config;
 638
 639        if (!(lc->supported & SUPPORTED_Autoneg))
 640                return -EOPNOTSUPP;             /* can't change speed/duplex */
 641
 642        if (cmd->autoneg == AUTONEG_DISABLE) {
 643                u32 speed = ethtool_cmd_speed(cmd);
 644                int cap = speed_duplex_to_caps(speed, cmd->duplex);
 645
 646                if (!(lc->supported & cap) || (speed == SPEED_1000))
 647                        return -EINVAL;
 648                lc->requested_speed = speed;
 649                lc->requested_duplex = cmd->duplex;
 650                lc->advertising = 0;
 651        } else {
 652                cmd->advertising &= ADVERTISED_MASK;
 653                if (cmd->advertising & (cmd->advertising - 1))
 654                        cmd->advertising = lc->supported;
 655                cmd->advertising &= lc->supported;
 656                if (!cmd->advertising)
 657                        return -EINVAL;
 658                lc->requested_speed = SPEED_INVALID;
 659                lc->requested_duplex = DUPLEX_INVALID;
 660                lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
 661        }
 662        lc->autoneg = cmd->autoneg;
 663        if (netif_running(dev))
 664                t1_link_start(p->phy, p->mac, lc);
 665        return 0;
 666}
 667
 668static void get_pauseparam(struct net_device *dev,
 669                           struct ethtool_pauseparam *epause)
 670{
 671        struct adapter *adapter = dev->ml_priv;
 672        struct port_info *p = &adapter->port[dev->if_port];
 673
 674        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 675        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 676        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 677}
 678
 679static int set_pauseparam(struct net_device *dev,
 680                          struct ethtool_pauseparam *epause)
 681{
 682        struct adapter *adapter = dev->ml_priv;
 683        struct port_info *p = &adapter->port[dev->if_port];
 684        struct link_config *lc = &p->link_config;
 685
 686        if (epause->autoneg == AUTONEG_DISABLE)
 687                lc->requested_fc = 0;
 688        else if (lc->supported & SUPPORTED_Autoneg)
 689                lc->requested_fc = PAUSE_AUTONEG;
 690        else
 691                return -EINVAL;
 692
 693        if (epause->rx_pause)
 694                lc->requested_fc |= PAUSE_RX;
 695        if (epause->tx_pause)
 696                lc->requested_fc |= PAUSE_TX;
 697        if (lc->autoneg == AUTONEG_ENABLE) {
 698                if (netif_running(dev))
 699                        t1_link_start(p->phy, p->mac, lc);
 700        } else {
 701                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 702                if (netif_running(dev))
 703                        p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
 704                                                         lc->fc);
 705        }
 706        return 0;
 707}
 708
 709static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 710{
 711        struct adapter *adapter = dev->ml_priv;
 712        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 713
 714        e->rx_max_pending = MAX_RX_BUFFERS;
 715        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 716        e->tx_max_pending = MAX_CMDQ_ENTRIES;
 717
 718        e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
 719        e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
 720        e->tx_pending = adapter->params.sge.cmdQ_size[0];
 721}
 722
 723static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 724{
 725        struct adapter *adapter = dev->ml_priv;
 726        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 727
 728        if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
 729            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 730            e->tx_pending > MAX_CMDQ_ENTRIES ||
 731            e->rx_pending < MIN_FL_ENTRIES ||
 732            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 733            e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
 734                return -EINVAL;
 735
 736        if (adapter->flags & FULL_INIT_DONE)
 737                return -EBUSY;
 738
 739        adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 740        adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
 741        adapter->params.sge.cmdQ_size[0] = e->tx_pending;
 742        adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
 743                MAX_CMDQ1_ENTRIES : e->tx_pending;
 744        return 0;
 745}
 746
 747static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 748{
 749        struct adapter *adapter = dev->ml_priv;
 750
 751        adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
 752        adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
 753        adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 754        t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 755        return 0;
 756}
 757
 758static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 759{
 760        struct adapter *adapter = dev->ml_priv;
 761
 762        c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
 763        c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
 764        c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
 765        return 0;
 766}
 767
 768static int get_eeprom_len(struct net_device *dev)
 769{
 770        struct adapter *adapter = dev->ml_priv;
 771
 772        return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 773}
 774
 775#define EEPROM_MAGIC(ap) \
 776        (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
 777
 778static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 779                      u8 *data)
 780{
 781        int i;
 782        u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
 783        struct adapter *adapter = dev->ml_priv;
 784
 785        e->magic = EEPROM_MAGIC(adapter);
 786        for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
 787                t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
 788        memcpy(data, buf + e->offset, e->len);
 789        return 0;
 790}
 791
 792static const struct ethtool_ops t1_ethtool_ops = {
 793        .get_settings      = get_settings,
 794        .set_settings      = set_settings,
 795        .get_drvinfo       = get_drvinfo,
 796        .get_msglevel      = get_msglevel,
 797        .set_msglevel      = set_msglevel,
 798        .get_ringparam     = get_sge_param,
 799        .set_ringparam     = set_sge_param,
 800        .get_coalesce      = get_coalesce,
 801        .set_coalesce      = set_coalesce,
 802        .get_eeprom_len    = get_eeprom_len,
 803        .get_eeprom        = get_eeprom,
 804        .get_pauseparam    = get_pauseparam,
 805        .set_pauseparam    = set_pauseparam,
 806        .get_link          = ethtool_op_get_link,
 807        .get_strings       = get_strings,
 808        .get_sset_count    = get_sset_count,
 809        .get_ethtool_stats = get_stats,
 810        .get_regs_len      = get_regs_len,
 811        .get_regs          = get_regs,
 812};
 813
 814static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 815{
 816        struct adapter *adapter = dev->ml_priv;
 817        struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
 818
 819        return mdio_mii_ioctl(mdio, if_mii(req), cmd);
 820}
 821
 822static int t1_change_mtu(struct net_device *dev, int new_mtu)
 823{
 824        int ret;
 825        struct adapter *adapter = dev->ml_priv;
 826        struct cmac *mac = adapter->port[dev->if_port].mac;
 827
 828        if (!mac->ops->set_mtu)
 829                return -EOPNOTSUPP;
 830        if (new_mtu < 68)
 831                return -EINVAL;
 832        if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 833                return ret;
 834        dev->mtu = new_mtu;
 835        return 0;
 836}
 837
 838static int t1_set_mac_addr(struct net_device *dev, void *p)
 839{
 840        struct adapter *adapter = dev->ml_priv;
 841        struct cmac *mac = adapter->port[dev->if_port].mac;
 842        struct sockaddr *addr = p;
 843
 844        if (!mac->ops->macaddress_set)
 845                return -EOPNOTSUPP;
 846
 847        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 848        mac->ops->macaddress_set(mac, dev->dev_addr);
 849        return 0;
 850}
 851
 852static u32 t1_fix_features(struct net_device *dev, u32 features)
 853{
 854        /*
 855         * Since there is no support for separate rx/tx vlan accel
 856         * enable/disable make sure tx flag is always in same state as rx.
 857         */
 858        if (features & NETIF_F_HW_VLAN_RX)
 859                features |= NETIF_F_HW_VLAN_TX;
 860        else
 861                features &= ~NETIF_F_HW_VLAN_TX;
 862
 863        return features;
 864}
 865
 866static int t1_set_features(struct net_device *dev, u32 features)
 867{
 868        u32 changed = dev->features ^ features;
 869        struct adapter *adapter = dev->ml_priv;
 870
 871        if (changed & NETIF_F_HW_VLAN_RX)
 872                t1_vlan_mode(adapter, features);
 873
 874        return 0;
 875}
 876#ifdef CONFIG_NET_POLL_CONTROLLER
 877static void t1_netpoll(struct net_device *dev)
 878{
 879        unsigned long flags;
 880        struct adapter *adapter = dev->ml_priv;
 881
 882        local_irq_save(flags);
 883        t1_interrupt(adapter->pdev->irq, adapter);
 884        local_irq_restore(flags);
 885}
 886#endif
 887
 888/*
 889 * Periodic accumulation of MAC statistics.  This is used only if the MAC
 890 * does not have any other way to prevent stats counter overflow.
 891 */
 892static void mac_stats_task(struct work_struct *work)
 893{
 894        int i;
 895        struct adapter *adapter =
 896                container_of(work, struct adapter, stats_update_task.work);
 897
 898        for_each_port(adapter, i) {
 899                struct port_info *p = &adapter->port[i];
 900
 901                if (netif_running(p->dev))
 902                        p->mac->ops->statistics_update(p->mac,
 903                                                       MAC_STATS_UPDATE_FAST);
 904        }
 905
 906        /* Schedule the next statistics update if any port is active. */
 907        spin_lock(&adapter->work_lock);
 908        if (adapter->open_device_map & PORT_MASK)
 909                schedule_mac_stats_update(adapter,
 910                                          adapter->params.stats_update_period);
 911        spin_unlock(&adapter->work_lock);
 912}
 913
 914/*
 915 * Processes elmer0 external interrupts in process context.
 916 */
 917static void ext_intr_task(struct work_struct *work)
 918{
 919        struct adapter *adapter =
 920                container_of(work, struct adapter, ext_intr_handler_task);
 921
 922        t1_elmer0_ext_intr_handler(adapter);
 923
 924        /* Now reenable external interrupts */
 925        spin_lock_irq(&adapter->async_lock);
 926        adapter->slow_intr_mask |= F_PL_INTR_EXT;
 927        writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
 928        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 929                   adapter->regs + A_PL_ENABLE);
 930        spin_unlock_irq(&adapter->async_lock);
 931}
 932
 933/*
 934 * Interrupt-context handler for elmer0 external interrupts.
 935 */
 936void t1_elmer0_ext_intr(struct adapter *adapter)
 937{
 938        /*
 939         * Schedule a task to handle external interrupts as we require
 940         * a process context.  We disable EXT interrupts in the interim
 941         * and let the task reenable them when it's done.
 942         */
 943        adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
 944        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 945                   adapter->regs + A_PL_ENABLE);
 946        schedule_work(&adapter->ext_intr_handler_task);
 947}
 948
 949void t1_fatal_err(struct adapter *adapter)
 950{
 951        if (adapter->flags & FULL_INIT_DONE) {
 952                t1_sge_stop(adapter->sge);
 953                t1_interrupts_disable(adapter);
 954        }
 955        pr_alert("%s: encountered fatal error, operation suspended\n",
 956                 adapter->name);
 957}
 958
 959static const struct net_device_ops cxgb_netdev_ops = {
 960        .ndo_open               = cxgb_open,
 961        .ndo_stop               = cxgb_close,
 962        .ndo_start_xmit         = t1_start_xmit,
 963        .ndo_get_stats          = t1_get_stats,
 964        .ndo_validate_addr      = eth_validate_addr,
 965        .ndo_set_rx_mode        = t1_set_rxmode,
 966        .ndo_do_ioctl           = t1_ioctl,
 967        .ndo_change_mtu         = t1_change_mtu,
 968        .ndo_set_mac_address    = t1_set_mac_addr,
 969        .ndo_fix_features       = t1_fix_features,
 970        .ndo_set_features       = t1_set_features,
 971#ifdef CONFIG_NET_POLL_CONTROLLER
 972        .ndo_poll_controller    = t1_netpoll,
 973#endif
 974};
 975
 976static int __devinit init_one(struct pci_dev *pdev,
 977                              const struct pci_device_id *ent)
 978{
 979        static int version_printed;
 980
 981        int i, err, pci_using_dac = 0;
 982        unsigned long mmio_start, mmio_len;
 983        const struct board_info *bi;
 984        struct adapter *adapter = NULL;
 985        struct port_info *pi;
 986
 987        if (!version_printed) {
 988                printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
 989                       DRV_VERSION);
 990                ++version_printed;
 991        }
 992
 993        err = pci_enable_device(pdev);
 994        if (err)
 995                return err;
 996
 997        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 998                pr_err("%s: cannot find PCI device memory base address\n",
 999                       pci_name(pdev));
1000                err = -ENODEV;
1001                goto out_disable_pdev;
1002        }
1003
1004        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1005                pci_using_dac = 1;
1006
1007                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1008                        pr_err("%s: unable to obtain 64-bit DMA for "
1009                               "consistent allocations\n", pci_name(pdev));
1010                        err = -ENODEV;
1011                        goto out_disable_pdev;
1012                }
1013
1014        } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1015                pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1016                goto out_disable_pdev;
1017        }
1018
1019        err = pci_request_regions(pdev, DRV_NAME);
1020        if (err) {
1021                pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1022                goto out_disable_pdev;
1023        }
1024
1025        pci_set_master(pdev);
1026
1027        mmio_start = pci_resource_start(pdev, 0);
1028        mmio_len = pci_resource_len(pdev, 0);
1029        bi = t1_get_board_info(ent->driver_data);
1030
1031        for (i = 0; i < bi->port_number; ++i) {
1032                struct net_device *netdev;
1033
1034                netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1035                if (!netdev) {
1036                        err = -ENOMEM;
1037                        goto out_free_dev;
1038                }
1039
1040                SET_NETDEV_DEV(netdev, &pdev->dev);
1041
1042                if (!adapter) {
1043                        adapter = netdev_priv(netdev);
1044                        adapter->pdev = pdev;
1045                        adapter->port[0].dev = netdev;  /* so we don't leak it */
1046
1047                        adapter->regs = ioremap(mmio_start, mmio_len);
1048                        if (!adapter->regs) {
1049                                pr_err("%s: cannot map device registers\n",
1050                                       pci_name(pdev));
1051                                err = -ENOMEM;
1052                                goto out_free_dev;
1053                        }
1054
1055                        if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1056                                err = -ENODEV;    /* Can't handle this chip rev */
1057                                goto out_free_dev;
1058                        }
1059
1060                        adapter->name = pci_name(pdev);
1061                        adapter->msg_enable = dflt_msg_enable;
1062                        adapter->mmio_len = mmio_len;
1063
1064                        spin_lock_init(&adapter->tpi_lock);
1065                        spin_lock_init(&adapter->work_lock);
1066                        spin_lock_init(&adapter->async_lock);
1067                        spin_lock_init(&adapter->mac_lock);
1068
1069                        INIT_WORK(&adapter->ext_intr_handler_task,
1070                                  ext_intr_task);
1071                        INIT_DELAYED_WORK(&adapter->stats_update_task,
1072                                          mac_stats_task);
1073
1074                        pci_set_drvdata(pdev, netdev);
1075                }
1076
1077                pi = &adapter->port[i];
1078                pi->dev = netdev;
1079                netif_carrier_off(netdev);
1080                netdev->irq = pdev->irq;
1081                netdev->if_port = i;
1082                netdev->mem_start = mmio_start;
1083                netdev->mem_end = mmio_start + mmio_len - 1;
1084                netdev->ml_priv = adapter;
1085                netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1086                        NETIF_F_RXCSUM;
1087                netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1088                        NETIF_F_RXCSUM | NETIF_F_LLTX;
1089
1090                if (pci_using_dac)
1091                        netdev->features |= NETIF_F_HIGHDMA;
1092                if (vlan_tso_capable(adapter)) {
1093                        netdev->features |=
1094                                NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1095                        netdev->hw_features |= NETIF_F_HW_VLAN_RX;
1096
1097                        /* T204: disable TSO */
1098                        if (!(is_T2(adapter)) || bi->port_number != 4) {
1099                                netdev->hw_features |= NETIF_F_TSO;
1100                                netdev->features |= NETIF_F_TSO;
1101                        }
1102                }
1103
1104                netdev->netdev_ops = &cxgb_netdev_ops;
1105                netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1106                        sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1107
1108                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1109
1110                SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1111        }
1112
1113        if (t1_init_sw_modules(adapter, bi) < 0) {
1114                err = -ENODEV;
1115                goto out_free_dev;
1116        }
1117
1118        /*
1119         * The card is now ready to go.  If any errors occur during device
1120         * registration we do not fail the whole card but rather proceed only
1121         * with the ports we manage to register successfully.  However we must
1122         * register at least one net device.
1123         */
1124        for (i = 0; i < bi->port_number; ++i) {
1125                err = register_netdev(adapter->port[i].dev);
1126                if (err)
1127                        pr_warning("%s: cannot register net device %s, skipping\n",
1128                                   pci_name(pdev), adapter->port[i].dev->name);
1129                else {
1130                        /*
1131                         * Change the name we use for messages to the name of
1132                         * the first successfully registered interface.
1133                         */
1134                        if (!adapter->registered_device_map)
1135                                adapter->name = adapter->port[i].dev->name;
1136
1137                        __set_bit(i, &adapter->registered_device_map);
1138                }
1139        }
1140        if (!adapter->registered_device_map) {
1141                pr_err("%s: could not register any net devices\n",
1142                       pci_name(pdev));
1143                goto out_release_adapter_res;
1144        }
1145
1146        printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1147               bi->desc, adapter->params.chip_revision,
1148               adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1149               adapter->params.pci.speed, adapter->params.pci.width);
1150
1151        /*
1152         * Set the T1B ASIC and memory clocks.
1153         */
1154        if (t1powersave)
1155                adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1156        else
1157                adapter->t1powersave = HCLOCK;
1158        if (t1_is_T1B(adapter))
1159                t1_clock(adapter, t1powersave);
1160
1161        return 0;
1162
1163out_release_adapter_res:
1164        t1_free_sw_modules(adapter);
1165out_free_dev:
1166        if (adapter) {
1167                if (adapter->regs)
1168                        iounmap(adapter->regs);
1169                for (i = bi->port_number - 1; i >= 0; --i)
1170                        if (adapter->port[i].dev)
1171                                free_netdev(adapter->port[i].dev);
1172        }
1173        pci_release_regions(pdev);
1174out_disable_pdev:
1175        pci_disable_device(pdev);
1176        pci_set_drvdata(pdev, NULL);
1177        return err;
1178}
1179
1180static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1181{
1182        int data;
1183        int i;
1184        u32 val;
1185
1186        enum {
1187                S_CLOCK = 1 << 3,
1188                S_DATA = 1 << 4
1189        };
1190
1191        for (i = (nbits - 1); i > -1; i--) {
1192
1193                udelay(50);
1194
1195                data = ((bitdata >> i) & 0x1);
1196                __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1197
1198                if (data)
1199                        val |= S_DATA;
1200                else
1201                        val &= ~S_DATA;
1202
1203                udelay(50);
1204
1205                /* Set SCLOCK low */
1206                val &= ~S_CLOCK;
1207                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1208
1209                udelay(50);
1210
1211                /* Write SCLOCK high */
1212                val |= S_CLOCK;
1213                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1214
1215        }
1216}
1217
1218static int t1_clock(struct adapter *adapter, int mode)
1219{
1220        u32 val;
1221        int M_CORE_VAL;
1222        int M_MEM_VAL;
1223
1224        enum {
1225                M_CORE_BITS     = 9,
1226                T_CORE_VAL      = 0,
1227                T_CORE_BITS     = 2,
1228                N_CORE_VAL      = 0,
1229                N_CORE_BITS     = 2,
1230                M_MEM_BITS      = 9,
1231                T_MEM_VAL       = 0,
1232                T_MEM_BITS      = 2,
1233                N_MEM_VAL       = 0,
1234                N_MEM_BITS      = 2,
1235                NP_LOAD         = 1 << 17,
1236                S_LOAD_MEM      = 1 << 5,
1237                S_LOAD_CORE     = 1 << 6,
1238                S_CLOCK         = 1 << 3
1239        };
1240
1241        if (!t1_is_T1B(adapter))
1242                return -ENODEV; /* Can't re-clock this chip. */
1243
1244        if (mode & 2)
1245                return 0;       /* show current mode. */
1246
1247        if ((adapter->t1powersave & 1) == (mode & 1))
1248                return -EALREADY;       /* ASIC already running in mode. */
1249
1250        if ((mode & 1) == HCLOCK) {
1251                M_CORE_VAL = 0x14;
1252                M_MEM_VAL = 0x18;
1253                adapter->t1powersave = HCLOCK;  /* overclock */
1254        } else {
1255                M_CORE_VAL = 0xe;
1256                M_MEM_VAL = 0x10;
1257                adapter->t1powersave = LCLOCK;  /* underclock */
1258        }
1259
1260        /* Don't interrupt this serial stream! */
1261        spin_lock(&adapter->tpi_lock);
1262
1263        /* Initialize for ASIC core */
1264        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1265        val |= NP_LOAD;
1266        udelay(50);
1267        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1268        udelay(50);
1269        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1270        val &= ~S_LOAD_CORE;
1271        val &= ~S_CLOCK;
1272        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1273        udelay(50);
1274
1275        /* Serial program the ASIC clock synthesizer */
1276        bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1277        bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1278        bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1279        udelay(50);
1280
1281        /* Finish ASIC core */
1282        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1283        val |= S_LOAD_CORE;
1284        udelay(50);
1285        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1286        udelay(50);
1287        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1288        val &= ~S_LOAD_CORE;
1289        udelay(50);
1290        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1291        udelay(50);
1292
1293        /* Initialize for memory */
1294        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1295        val |= NP_LOAD;
1296        udelay(50);
1297        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1298        udelay(50);
1299        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1300        val &= ~S_LOAD_MEM;
1301        val &= ~S_CLOCK;
1302        udelay(50);
1303        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1304        udelay(50);
1305
1306        /* Serial program the memory clock synthesizer */
1307        bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1308        bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1309        bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1310        udelay(50);
1311
1312        /* Finish memory */
1313        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1314        val |= S_LOAD_MEM;
1315        udelay(50);
1316        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1317        udelay(50);
1318        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1319        val &= ~S_LOAD_MEM;
1320        udelay(50);
1321        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1322
1323        spin_unlock(&adapter->tpi_lock);
1324
1325        return 0;
1326}
1327
1328static inline void t1_sw_reset(struct pci_dev *pdev)
1329{
1330        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1331        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1332}
1333
1334static void __devexit remove_one(struct pci_dev *pdev)
1335{
1336        struct net_device *dev = pci_get_drvdata(pdev);
1337        struct adapter *adapter = dev->ml_priv;
1338        int i;
1339
1340        for_each_port(adapter, i) {
1341                if (test_bit(i, &adapter->registered_device_map))
1342                        unregister_netdev(adapter->port[i].dev);
1343        }
1344
1345        t1_free_sw_modules(adapter);
1346        iounmap(adapter->regs);
1347
1348        while (--i >= 0) {
1349                if (adapter->port[i].dev)
1350                        free_netdev(adapter->port[i].dev);
1351        }
1352
1353        pci_release_regions(pdev);
1354        pci_disable_device(pdev);
1355        pci_set_drvdata(pdev, NULL);
1356        t1_sw_reset(pdev);
1357}
1358
1359static struct pci_driver driver = {
1360        .name     = DRV_NAME,
1361        .id_table = t1_pci_tbl,
1362        .probe    = init_one,
1363        .remove   = __devexit_p(remove_one),
1364};
1365
1366static int __init t1_init_module(void)
1367{
1368        return pci_register_driver(&driver);
1369}
1370
1371static void __exit t1_cleanup_module(void)
1372{
1373        pci_unregister_driver(&driver);
1374}
1375
1376module_init(t1_init_module);
1377module_exit(t1_cleanup_module);
1378