linux/drivers/net/chelsio/cxgb2.c
<<
>>
Prefs
   1/*****************************************************************************
   2 *                                                                           *
   3 * File: cxgb2.c                                                             *
   4 * $Revision: 1.25 $                                                         *
   5 * $Date: 2005/06/22 00:43:25 $                                              *
   6 * Description:                                                              *
   7 *  Chelsio 10Gb Ethernet Driver.                                            *
   8 *                                                                           *
   9 * This program is free software; you can redistribute it and/or modify      *
  10 * it under the terms of the GNU General Public License, version 2, as       *
  11 * published by the Free Software Foundation.                                *
  12 *                                                                           *
  13 * You should have received a copy of the GNU General Public License along   *
  14 * with this program; if not, write to the Free Software Foundation, Inc.,   *
  15 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
  16 *                                                                           *
  17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
  19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
  20 *                                                                           *
  21 * http://www.chelsio.com                                                    *
  22 *                                                                           *
  23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
  24 * All rights reserved.                                                      *
  25 *                                                                           *
  26 * Maintainers: maintainers@chelsio.com                                      *
  27 *                                                                           *
  28 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
  29 *          Tina Yang               <tainay@chelsio.com>                     *
  30 *          Felix Marti             <felix@chelsio.com>                      *
  31 *          Scott Bardone           <sbardone@chelsio.com>                   *
  32 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
  33 *          Frank DiMambro          <frank@chelsio.com>                      *
  34 *                                                                           *
  35 * History:                                                                  *
  36 *                                                                           *
  37 ****************************************************************************/
  38
  39#include "common.h"
  40#include <linux/module.h>
  41#include <linux/init.h>
  42#include <linux/pci.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/if_vlan.h>
  46#include <linux/mii.h>
  47#include <linux/sockios.h>
  48#include <linux/dma-mapping.h>
  49#include <asm/uaccess.h>
  50
  51#include "cpl5_cmd.h"
  52#include "regs.h"
  53#include "gmac.h"
  54#include "cphy.h"
  55#include "sge.h"
  56#include "tp.h"
  57#include "espi.h"
  58#include "elmer0.h"
  59
  60#include <linux/workqueue.h>
  61
  62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
  63{
  64        schedule_delayed_work(&ap->stats_update_task, secs * HZ);
  65}
  66
  67static inline void cancel_mac_stats_update(struct adapter *ap)
  68{
  69        cancel_delayed_work(&ap->stats_update_task);
  70}
  71
  72#define MAX_CMDQ_ENTRIES        16384
  73#define MAX_CMDQ1_ENTRIES       1024
  74#define MAX_RX_BUFFERS          16384
  75#define MAX_RX_JUMBO_BUFFERS    16384
  76#define MAX_TX_BUFFERS_HIGH     16384U
  77#define MAX_TX_BUFFERS_LOW      1536U
  78#define MAX_TX_BUFFERS          1460U
  79#define MIN_FL_ENTRIES          32
  80
  81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  82                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  83                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  84
  85/*
  86 * The EEPROM is actually bigger but only the first few bytes are used so we
  87 * only report those.
  88 */
  89#define EEPROM_SIZE 32
  90
  91MODULE_DESCRIPTION(DRV_DESCRIPTION);
  92MODULE_AUTHOR("Chelsio Communications");
  93MODULE_LICENSE("GPL");
  94
  95static int dflt_msg_enable = DFLT_MSG_ENABLE;
  96
  97module_param(dflt_msg_enable, int, 0);
  98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
  99
 100#define HCLOCK 0x0
 101#define LCLOCK 0x1
 102
 103/* T1 cards powersave mode */
 104static int t1_clock(struct adapter *adapter, int mode);
 105static int t1powersave = 1;     /* HW default is powersave mode. */
 106
 107module_param(t1powersave, int, 0);
 108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
 109
 110static int disable_msi = 0;
 111module_param(disable_msi, int, 0);
 112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 113
 114static const char pci_speed[][4] = {
 115        "33", "66", "100", "133"
 116};
 117
 118/*
 119 * Setup MAC to receive the types of packets we want.
 120 */
 121static void t1_set_rxmode(struct net_device *dev)
 122{
 123        struct adapter *adapter = dev->ml_priv;
 124        struct cmac *mac = adapter->port[dev->if_port].mac;
 125        struct t1_rx_mode rm;
 126
 127        rm.dev = dev;
 128        mac->ops->set_rx_mode(mac, &rm);
 129}
 130
 131static void link_report(struct port_info *p)
 132{
 133        if (!netif_carrier_ok(p->dev))
 134                printk(KERN_INFO "%s: link down\n", p->dev->name);
 135        else {
 136                const char *s = "10Mbps";
 137
 138                switch (p->link_config.speed) {
 139                        case SPEED_10000: s = "10Gbps"; break;
 140                        case SPEED_1000:  s = "1000Mbps"; break;
 141                        case SPEED_100:   s = "100Mbps"; break;
 142                }
 143
 144                printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
 145                       p->dev->name, s,
 146                       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 147        }
 148}
 149
 150void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
 151                        int speed, int duplex, int pause)
 152{
 153        struct port_info *p = &adapter->port[port_id];
 154
 155        if (link_stat != netif_carrier_ok(p->dev)) {
 156                if (link_stat)
 157                        netif_carrier_on(p->dev);
 158                else
 159                        netif_carrier_off(p->dev);
 160                link_report(p);
 161
 162                /* multi-ports: inform toe */
 163                if ((speed > 0) && (adapter->params.nports > 1)) {
 164                        unsigned int sched_speed = 10;
 165                        switch (speed) {
 166                        case SPEED_1000:
 167                                sched_speed = 1000;
 168                                break;
 169                        case SPEED_100:
 170                                sched_speed = 100;
 171                                break;
 172                        case SPEED_10:
 173                                sched_speed = 10;
 174                                break;
 175                        }
 176                        t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
 177                }
 178        }
 179}
 180
 181static void link_start(struct port_info *p)
 182{
 183        struct cmac *mac = p->mac;
 184
 185        mac->ops->reset(mac);
 186        if (mac->ops->macaddress_set)
 187                mac->ops->macaddress_set(mac, p->dev->dev_addr);
 188        t1_set_rxmode(p->dev);
 189        t1_link_start(p->phy, mac, &p->link_config);
 190        mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 191}
 192
 193static void enable_hw_csum(struct adapter *adapter)
 194{
 195        if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
 196                t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
 197        t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
 198}
 199
 200/*
 201 * Things to do upon first use of a card.
 202 * This must run with the rtnl lock held.
 203 */
 204static int cxgb_up(struct adapter *adapter)
 205{
 206        int err = 0;
 207
 208        if (!(adapter->flags & FULL_INIT_DONE)) {
 209                err = t1_init_hw_modules(adapter);
 210                if (err)
 211                        goto out_err;
 212
 213                enable_hw_csum(adapter);
 214                adapter->flags |= FULL_INIT_DONE;
 215        }
 216
 217        t1_interrupts_clear(adapter);
 218
 219        adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
 220        err = request_irq(adapter->pdev->irq, t1_interrupt,
 221                          adapter->params.has_msi ? 0 : IRQF_SHARED,
 222                          adapter->name, adapter);
 223        if (err) {
 224                if (adapter->params.has_msi)
 225                        pci_disable_msi(adapter->pdev);
 226
 227                goto out_err;
 228        }
 229
 230        t1_sge_start(adapter->sge);
 231        t1_interrupts_enable(adapter);
 232out_err:
 233        return err;
 234}
 235
 236/*
 237 * Release resources when all the ports have been stopped.
 238 */
 239static void cxgb_down(struct adapter *adapter)
 240{
 241        t1_sge_stop(adapter->sge);
 242        t1_interrupts_disable(adapter);
 243        free_irq(adapter->pdev->irq, adapter);
 244        if (adapter->params.has_msi)
 245                pci_disable_msi(adapter->pdev);
 246}
 247
 248static int cxgb_open(struct net_device *dev)
 249{
 250        int err;
 251        struct adapter *adapter = dev->ml_priv;
 252        int other_ports = adapter->open_device_map & PORT_MASK;
 253
 254        napi_enable(&adapter->napi);
 255        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
 256                napi_disable(&adapter->napi);
 257                return err;
 258        }
 259
 260        __set_bit(dev->if_port, &adapter->open_device_map);
 261        link_start(&adapter->port[dev->if_port]);
 262        netif_start_queue(dev);
 263        if (!other_ports && adapter->params.stats_update_period)
 264                schedule_mac_stats_update(adapter,
 265                                          adapter->params.stats_update_period);
 266        return 0;
 267}
 268
 269static int cxgb_close(struct net_device *dev)
 270{
 271        struct adapter *adapter = dev->ml_priv;
 272        struct port_info *p = &adapter->port[dev->if_port];
 273        struct cmac *mac = p->mac;
 274
 275        netif_stop_queue(dev);
 276        napi_disable(&adapter->napi);
 277        mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 278        netif_carrier_off(dev);
 279
 280        clear_bit(dev->if_port, &adapter->open_device_map);
 281        if (adapter->params.stats_update_period &&
 282            !(adapter->open_device_map & PORT_MASK)) {
 283                /* Stop statistics accumulation. */
 284                smp_mb__after_clear_bit();
 285                spin_lock(&adapter->work_lock);   /* sync with update task */
 286                spin_unlock(&adapter->work_lock);
 287                cancel_mac_stats_update(adapter);
 288        }
 289
 290        if (!adapter->open_device_map)
 291                cxgb_down(adapter);
 292        return 0;
 293}
 294
 295static struct net_device_stats *t1_get_stats(struct net_device *dev)
 296{
 297        struct adapter *adapter = dev->ml_priv;
 298        struct port_info *p = &adapter->port[dev->if_port];
 299        struct net_device_stats *ns = &p->netstats;
 300        const struct cmac_statistics *pstats;
 301
 302        /* Do a full update of the MAC stats */
 303        pstats = p->mac->ops->statistics_update(p->mac,
 304                                                MAC_STATS_UPDATE_FULL);
 305
 306        ns->tx_packets = pstats->TxUnicastFramesOK +
 307                pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
 308
 309        ns->rx_packets = pstats->RxUnicastFramesOK +
 310                pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
 311
 312        ns->tx_bytes = pstats->TxOctetsOK;
 313        ns->rx_bytes = pstats->RxOctetsOK;
 314
 315        ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
 316                pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
 317        ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
 318                pstats->RxFCSErrors + pstats->RxAlignErrors +
 319                pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
 320                pstats->RxSymbolErrors + pstats->RxRuntErrors;
 321
 322        ns->multicast  = pstats->RxMulticastFramesOK;
 323        ns->collisions = pstats->TxTotalCollisions;
 324
 325        /* detailed rx_errors */
 326        ns->rx_length_errors = pstats->RxFrameTooLongErrors +
 327                pstats->RxJabberErrors;
 328        ns->rx_over_errors   = 0;
 329        ns->rx_crc_errors    = pstats->RxFCSErrors;
 330        ns->rx_frame_errors  = pstats->RxAlignErrors;
 331        ns->rx_fifo_errors   = 0;
 332        ns->rx_missed_errors = 0;
 333
 334        /* detailed tx_errors */
 335        ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
 336        ns->tx_carrier_errors   = 0;
 337        ns->tx_fifo_errors      = pstats->TxUnderrun;
 338        ns->tx_heartbeat_errors = 0;
 339        ns->tx_window_errors    = pstats->TxLateCollisions;
 340        return ns;
 341}
 342
 343static u32 get_msglevel(struct net_device *dev)
 344{
 345        struct adapter *adapter = dev->ml_priv;
 346
 347        return adapter->msg_enable;
 348}
 349
 350static void set_msglevel(struct net_device *dev, u32 val)
 351{
 352        struct adapter *adapter = dev->ml_priv;
 353
 354        adapter->msg_enable = val;
 355}
 356
 357static char stats_strings[][ETH_GSTRING_LEN] = {
 358        "TxOctetsOK",
 359        "TxOctetsBad",
 360        "TxUnicastFramesOK",
 361        "TxMulticastFramesOK",
 362        "TxBroadcastFramesOK",
 363        "TxPauseFrames",
 364        "TxFramesWithDeferredXmissions",
 365        "TxLateCollisions",
 366        "TxTotalCollisions",
 367        "TxFramesAbortedDueToXSCollisions",
 368        "TxUnderrun",
 369        "TxLengthErrors",
 370        "TxInternalMACXmitError",
 371        "TxFramesWithExcessiveDeferral",
 372        "TxFCSErrors",
 373        "TxJumboFramesOk",
 374        "TxJumboOctetsOk",
 375        
 376        "RxOctetsOK",
 377        "RxOctetsBad",
 378        "RxUnicastFramesOK",
 379        "RxMulticastFramesOK",
 380        "RxBroadcastFramesOK",
 381        "RxPauseFrames",
 382        "RxFCSErrors",
 383        "RxAlignErrors",
 384        "RxSymbolErrors",
 385        "RxDataErrors",
 386        "RxSequenceErrors",
 387        "RxRuntErrors",
 388        "RxJabberErrors",
 389        "RxInternalMACRcvError",
 390        "RxInRangeLengthErrors",
 391        "RxOutOfRangeLengthField",
 392        "RxFrameTooLongErrors",
 393        "RxJumboFramesOk",
 394        "RxJumboOctetsOk",
 395
 396        /* Port stats */
 397        "RxCsumGood",
 398        "TxCsumOffload",
 399        "TxTso",
 400        "RxVlan",
 401        "TxVlan",
 402        "TxNeedHeadroom", 
 403        
 404        /* Interrupt stats */
 405        "rx drops",
 406        "pure_rsps",
 407        "unhandled irqs",
 408        "respQ_empty",
 409        "respQ_overflow",
 410        "freelistQ_empty",
 411        "pkt_too_big",
 412        "pkt_mismatch",
 413        "cmdQ_full0",
 414        "cmdQ_full1",
 415
 416        "espi_DIP2ParityErr",
 417        "espi_DIP4Err",
 418        "espi_RxDrops",
 419        "espi_TxDrops",
 420        "espi_RxOvfl",
 421        "espi_ParityErr"
 422};
 423
 424#define T2_REGMAP_SIZE (3 * 1024)
 425
 426static int get_regs_len(struct net_device *dev)
 427{
 428        return T2_REGMAP_SIZE;
 429}
 430
 431static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 432{
 433        struct adapter *adapter = dev->ml_priv;
 434
 435        strcpy(info->driver, DRV_NAME);
 436        strcpy(info->version, DRV_VERSION);
 437        strcpy(info->fw_version, "N/A");
 438        strcpy(info->bus_info, pci_name(adapter->pdev));
 439}
 440
 441static int get_sset_count(struct net_device *dev, int sset)
 442{
 443        switch (sset) {
 444        case ETH_SS_STATS:
 445                return ARRAY_SIZE(stats_strings);
 446        default:
 447                return -EOPNOTSUPP;
 448        }
 449}
 450
 451static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 452{
 453        if (stringset == ETH_SS_STATS)
 454                memcpy(data, stats_strings, sizeof(stats_strings));
 455}
 456
 457static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 458                      u64 *data)
 459{
 460        struct adapter *adapter = dev->ml_priv;
 461        struct cmac *mac = adapter->port[dev->if_port].mac;
 462        const struct cmac_statistics *s;
 463        const struct sge_intr_counts *t;
 464        struct sge_port_stats ss;
 465
 466        s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 467        t = t1_sge_get_intr_counts(adapter->sge);
 468        t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
 469
 470        *data++ = s->TxOctetsOK;
 471        *data++ = s->TxOctetsBad;
 472        *data++ = s->TxUnicastFramesOK;
 473        *data++ = s->TxMulticastFramesOK;
 474        *data++ = s->TxBroadcastFramesOK;
 475        *data++ = s->TxPauseFrames;
 476        *data++ = s->TxFramesWithDeferredXmissions;
 477        *data++ = s->TxLateCollisions;
 478        *data++ = s->TxTotalCollisions;
 479        *data++ = s->TxFramesAbortedDueToXSCollisions;
 480        *data++ = s->TxUnderrun;
 481        *data++ = s->TxLengthErrors;
 482        *data++ = s->TxInternalMACXmitError;
 483        *data++ = s->TxFramesWithExcessiveDeferral;
 484        *data++ = s->TxFCSErrors;
 485        *data++ = s->TxJumboFramesOK;
 486        *data++ = s->TxJumboOctetsOK;
 487
 488        *data++ = s->RxOctetsOK;
 489        *data++ = s->RxOctetsBad;
 490        *data++ = s->RxUnicastFramesOK;
 491        *data++ = s->RxMulticastFramesOK;
 492        *data++ = s->RxBroadcastFramesOK;
 493        *data++ = s->RxPauseFrames;
 494        *data++ = s->RxFCSErrors;
 495        *data++ = s->RxAlignErrors;
 496        *data++ = s->RxSymbolErrors;
 497        *data++ = s->RxDataErrors;
 498        *data++ = s->RxSequenceErrors;
 499        *data++ = s->RxRuntErrors;
 500        *data++ = s->RxJabberErrors;
 501        *data++ = s->RxInternalMACRcvError;
 502        *data++ = s->RxInRangeLengthErrors;
 503        *data++ = s->RxOutOfRangeLengthField;
 504        *data++ = s->RxFrameTooLongErrors;
 505        *data++ = s->RxJumboFramesOK;
 506        *data++ = s->RxJumboOctetsOK;
 507
 508        *data++ = ss.rx_cso_good;
 509        *data++ = ss.tx_cso;
 510        *data++ = ss.tx_tso;
 511        *data++ = ss.vlan_xtract;
 512        *data++ = ss.vlan_insert;
 513        *data++ = ss.tx_need_hdrroom;
 514        
 515        *data++ = t->rx_drops;
 516        *data++ = t->pure_rsps;
 517        *data++ = t->unhandled_irqs;
 518        *data++ = t->respQ_empty;
 519        *data++ = t->respQ_overflow;
 520        *data++ = t->freelistQ_empty;
 521        *data++ = t->pkt_too_big;
 522        *data++ = t->pkt_mismatch;
 523        *data++ = t->cmdQ_full[0];
 524        *data++ = t->cmdQ_full[1];
 525
 526        if (adapter->espi) {
 527                const struct espi_intr_counts *e;
 528
 529                e = t1_espi_get_intr_counts(adapter->espi);
 530                *data++ = e->DIP2_parity_err;
 531                *data++ = e->DIP4_err;
 532                *data++ = e->rx_drops;
 533                *data++ = e->tx_drops;
 534                *data++ = e->rx_ovflw;
 535                *data++ = e->parity_err;
 536        }
 537}
 538
 539static inline void reg_block_dump(struct adapter *ap, void *buf,
 540                                  unsigned int start, unsigned int end)
 541{
 542        u32 *p = buf + start;
 543
 544        for ( ; start <= end; start += sizeof(u32))
 545                *p++ = readl(ap->regs + start);
 546}
 547
 548static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 549                     void *buf)
 550{
 551        struct adapter *ap = dev->ml_priv;
 552
 553        /*
 554         * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
 555         */
 556        regs->version = 2;
 557
 558        memset(buf, 0, T2_REGMAP_SIZE);
 559        reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
 560        reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
 561        reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
 562        reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
 563        reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
 564        reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
 565        reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
 566        reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
 567        reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
 568        reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
 569}
 570
 571static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 572{
 573        struct adapter *adapter = dev->ml_priv;
 574        struct port_info *p = &adapter->port[dev->if_port];
 575
 576        cmd->supported = p->link_config.supported;
 577        cmd->advertising = p->link_config.advertising;
 578
 579        if (netif_carrier_ok(dev)) {
 580                ethtool_cmd_speed_set(cmd, p->link_config.speed);
 581                cmd->duplex = p->link_config.duplex;
 582        } else {
 583                ethtool_cmd_speed_set(cmd, -1);
 584                cmd->duplex = -1;
 585        }
 586
 587        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 588        cmd->phy_address = p->phy->mdio.prtad;
 589        cmd->transceiver = XCVR_EXTERNAL;
 590        cmd->autoneg = p->link_config.autoneg;
 591        cmd->maxtxpkt = 0;
 592        cmd->maxrxpkt = 0;
 593        return 0;
 594}
 595
 596static int speed_duplex_to_caps(int speed, int duplex)
 597{
 598        int cap = 0;
 599
 600        switch (speed) {
 601        case SPEED_10:
 602                if (duplex == DUPLEX_FULL)
 603                        cap = SUPPORTED_10baseT_Full;
 604                else
 605                        cap = SUPPORTED_10baseT_Half;
 606                break;
 607        case SPEED_100:
 608                if (duplex == DUPLEX_FULL)
 609                        cap = SUPPORTED_100baseT_Full;
 610                else
 611                        cap = SUPPORTED_100baseT_Half;
 612                break;
 613        case SPEED_1000:
 614                if (duplex == DUPLEX_FULL)
 615                        cap = SUPPORTED_1000baseT_Full;
 616                else
 617                        cap = SUPPORTED_1000baseT_Half;
 618                break;
 619        case SPEED_10000:
 620                if (duplex == DUPLEX_FULL)
 621                        cap = SUPPORTED_10000baseT_Full;
 622        }
 623        return cap;
 624}
 625
 626#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 627                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 628                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 629                      ADVERTISED_10000baseT_Full)
 630
 631static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 632{
 633        struct adapter *adapter = dev->ml_priv;
 634        struct port_info *p = &adapter->port[dev->if_port];
 635        struct link_config *lc = &p->link_config;
 636
 637        if (!(lc->supported & SUPPORTED_Autoneg))
 638                return -EOPNOTSUPP;             /* can't change speed/duplex */
 639
 640        if (cmd->autoneg == AUTONEG_DISABLE) {
 641                u32 speed = ethtool_cmd_speed(cmd);
 642                int cap = speed_duplex_to_caps(speed, cmd->duplex);
 643
 644                if (!(lc->supported & cap) || (speed == SPEED_1000))
 645                        return -EINVAL;
 646                lc->requested_speed = speed;
 647                lc->requested_duplex = cmd->duplex;
 648                lc->advertising = 0;
 649        } else {
 650                cmd->advertising &= ADVERTISED_MASK;
 651                if (cmd->advertising & (cmd->advertising - 1))
 652                        cmd->advertising = lc->supported;
 653                cmd->advertising &= lc->supported;
 654                if (!cmd->advertising)
 655                        return -EINVAL;
 656                lc->requested_speed = SPEED_INVALID;
 657                lc->requested_duplex = DUPLEX_INVALID;
 658                lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
 659        }
 660        lc->autoneg = cmd->autoneg;
 661        if (netif_running(dev))
 662                t1_link_start(p->phy, p->mac, lc);
 663        return 0;
 664}
 665
 666static void get_pauseparam(struct net_device *dev,
 667                           struct ethtool_pauseparam *epause)
 668{
 669        struct adapter *adapter = dev->ml_priv;
 670        struct port_info *p = &adapter->port[dev->if_port];
 671
 672        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 673        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 674        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 675}
 676
 677static int set_pauseparam(struct net_device *dev,
 678                          struct ethtool_pauseparam *epause)
 679{
 680        struct adapter *adapter = dev->ml_priv;
 681        struct port_info *p = &adapter->port[dev->if_port];
 682        struct link_config *lc = &p->link_config;
 683
 684        if (epause->autoneg == AUTONEG_DISABLE)
 685                lc->requested_fc = 0;
 686        else if (lc->supported & SUPPORTED_Autoneg)
 687                lc->requested_fc = PAUSE_AUTONEG;
 688        else
 689                return -EINVAL;
 690
 691        if (epause->rx_pause)
 692                lc->requested_fc |= PAUSE_RX;
 693        if (epause->tx_pause)
 694                lc->requested_fc |= PAUSE_TX;
 695        if (lc->autoneg == AUTONEG_ENABLE) {
 696                if (netif_running(dev))
 697                        t1_link_start(p->phy, p->mac, lc);
 698        } else {
 699                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 700                if (netif_running(dev))
 701                        p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
 702                                                         lc->fc);
 703        }
 704        return 0;
 705}
 706
 707static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 708{
 709        struct adapter *adapter = dev->ml_priv;
 710        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 711
 712        e->rx_max_pending = MAX_RX_BUFFERS;
 713        e->rx_mini_max_pending = 0;
 714        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 715        e->tx_max_pending = MAX_CMDQ_ENTRIES;
 716
 717        e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
 718        e->rx_mini_pending = 0;
 719        e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
 720        e->tx_pending = adapter->params.sge.cmdQ_size[0];
 721}
 722
 723static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 724{
 725        struct adapter *adapter = dev->ml_priv;
 726        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 727
 728        if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
 729            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 730            e->tx_pending > MAX_CMDQ_ENTRIES ||
 731            e->rx_pending < MIN_FL_ENTRIES ||
 732            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 733            e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
 734                return -EINVAL;
 735
 736        if (adapter->flags & FULL_INIT_DONE)
 737                return -EBUSY;
 738
 739        adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 740        adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
 741        adapter->params.sge.cmdQ_size[0] = e->tx_pending;
 742        adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
 743                MAX_CMDQ1_ENTRIES : e->tx_pending;
 744        return 0;
 745}
 746
 747static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 748{
 749        struct adapter *adapter = dev->ml_priv;
 750
 751        adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
 752        adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
 753        adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 754        t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 755        return 0;
 756}
 757
 758static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 759{
 760        struct adapter *adapter = dev->ml_priv;
 761
 762        c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
 763        c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
 764        c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
 765        return 0;
 766}
 767
 768static int get_eeprom_len(struct net_device *dev)
 769{
 770        struct adapter *adapter = dev->ml_priv;
 771
 772        return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 773}
 774
 775#define EEPROM_MAGIC(ap) \
 776        (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
 777
 778static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 779                      u8 *data)
 780{
 781        int i;
 782        u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
 783        struct adapter *adapter = dev->ml_priv;
 784
 785        e->magic = EEPROM_MAGIC(adapter);
 786        for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
 787                t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
 788        memcpy(data, buf + e->offset, e->len);
 789        return 0;
 790}
 791
 792static const struct ethtool_ops t1_ethtool_ops = {
 793        .get_settings      = get_settings,
 794        .set_settings      = set_settings,
 795        .get_drvinfo       = get_drvinfo,
 796        .get_msglevel      = get_msglevel,
 797        .set_msglevel      = set_msglevel,
 798        .get_ringparam     = get_sge_param,
 799        .set_ringparam     = set_sge_param,
 800        .get_coalesce      = get_coalesce,
 801        .set_coalesce      = set_coalesce,
 802        .get_eeprom_len    = get_eeprom_len,
 803        .get_eeprom        = get_eeprom,
 804        .get_pauseparam    = get_pauseparam,
 805        .set_pauseparam    = set_pauseparam,
 806        .get_link          = ethtool_op_get_link,
 807        .get_strings       = get_strings,
 808        .get_sset_count    = get_sset_count,
 809        .get_ethtool_stats = get_stats,
 810        .get_regs_len      = get_regs_len,
 811        .get_regs          = get_regs,
 812};
 813
 814static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 815{
 816        struct adapter *adapter = dev->ml_priv;
 817        struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
 818
 819        return mdio_mii_ioctl(mdio, if_mii(req), cmd);
 820}
 821
 822static int t1_change_mtu(struct net_device *dev, int new_mtu)
 823{
 824        int ret;
 825        struct adapter *adapter = dev->ml_priv;
 826        struct cmac *mac = adapter->port[dev->if_port].mac;
 827
 828        if (!mac->ops->set_mtu)
 829                return -EOPNOTSUPP;
 830        if (new_mtu < 68)
 831                return -EINVAL;
 832        if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 833                return ret;
 834        dev->mtu = new_mtu;
 835        return 0;
 836}
 837
 838static int t1_set_mac_addr(struct net_device *dev, void *p)
 839{
 840        struct adapter *adapter = dev->ml_priv;
 841        struct cmac *mac = adapter->port[dev->if_port].mac;
 842        struct sockaddr *addr = p;
 843
 844        if (!mac->ops->macaddress_set)
 845                return -EOPNOTSUPP;
 846
 847        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 848        mac->ops->macaddress_set(mac, dev->dev_addr);
 849        return 0;
 850}
 851
 852#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 853static void t1_vlan_rx_register(struct net_device *dev,
 854                                   struct vlan_group *grp)
 855{
 856        struct adapter *adapter = dev->ml_priv;
 857
 858        spin_lock_irq(&adapter->async_lock);
 859        adapter->vlan_grp = grp;
 860        t1_set_vlan_accel(adapter, grp != NULL);
 861        spin_unlock_irq(&adapter->async_lock);
 862}
 863#endif
 864
 865#ifdef CONFIG_NET_POLL_CONTROLLER
 866static void t1_netpoll(struct net_device *dev)
 867{
 868        unsigned long flags;
 869        struct adapter *adapter = dev->ml_priv;
 870
 871        local_irq_save(flags);
 872        t1_interrupt(adapter->pdev->irq, adapter);
 873        local_irq_restore(flags);
 874}
 875#endif
 876
 877/*
 878 * Periodic accumulation of MAC statistics.  This is used only if the MAC
 879 * does not have any other way to prevent stats counter overflow.
 880 */
 881static void mac_stats_task(struct work_struct *work)
 882{
 883        int i;
 884        struct adapter *adapter =
 885                container_of(work, struct adapter, stats_update_task.work);
 886
 887        for_each_port(adapter, i) {
 888                struct port_info *p = &adapter->port[i];
 889
 890                if (netif_running(p->dev))
 891                        p->mac->ops->statistics_update(p->mac,
 892                                                       MAC_STATS_UPDATE_FAST);
 893        }
 894
 895        /* Schedule the next statistics update if any port is active. */
 896        spin_lock(&adapter->work_lock);
 897        if (adapter->open_device_map & PORT_MASK)
 898                schedule_mac_stats_update(adapter,
 899                                          adapter->params.stats_update_period);
 900        spin_unlock(&adapter->work_lock);
 901}
 902
 903/*
 904 * Processes elmer0 external interrupts in process context.
 905 */
 906static void ext_intr_task(struct work_struct *work)
 907{
 908        struct adapter *adapter =
 909                container_of(work, struct adapter, ext_intr_handler_task);
 910
 911        t1_elmer0_ext_intr_handler(adapter);
 912
 913        /* Now reenable external interrupts */
 914        spin_lock_irq(&adapter->async_lock);
 915        adapter->slow_intr_mask |= F_PL_INTR_EXT;
 916        writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
 917        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 918                   adapter->regs + A_PL_ENABLE);
 919        spin_unlock_irq(&adapter->async_lock);
 920}
 921
 922/*
 923 * Interrupt-context handler for elmer0 external interrupts.
 924 */
 925void t1_elmer0_ext_intr(struct adapter *adapter)
 926{
 927        /*
 928         * Schedule a task to handle external interrupts as we require
 929         * a process context.  We disable EXT interrupts in the interim
 930         * and let the task reenable them when it's done.
 931         */
 932        adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
 933        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 934                   adapter->regs + A_PL_ENABLE);
 935        schedule_work(&adapter->ext_intr_handler_task);
 936}
 937
 938void t1_fatal_err(struct adapter *adapter)
 939{
 940        if (adapter->flags & FULL_INIT_DONE) {
 941                t1_sge_stop(adapter->sge);
 942                t1_interrupts_disable(adapter);
 943        }
 944        pr_alert("%s: encountered fatal error, operation suspended\n",
 945                 adapter->name);
 946}
 947
 948static const struct net_device_ops cxgb_netdev_ops = {
 949        .ndo_open               = cxgb_open,
 950        .ndo_stop               = cxgb_close,
 951        .ndo_start_xmit         = t1_start_xmit,
 952        .ndo_get_stats          = t1_get_stats,
 953        .ndo_validate_addr      = eth_validate_addr,
 954        .ndo_set_multicast_list = t1_set_rxmode,
 955        .ndo_do_ioctl           = t1_ioctl,
 956        .ndo_change_mtu         = t1_change_mtu,
 957        .ndo_set_mac_address    = t1_set_mac_addr,
 958#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 959        .ndo_vlan_rx_register   = t1_vlan_rx_register,
 960#endif
 961#ifdef CONFIG_NET_POLL_CONTROLLER
 962        .ndo_poll_controller    = t1_netpoll,
 963#endif
 964};
 965
 966static int __devinit init_one(struct pci_dev *pdev,
 967                              const struct pci_device_id *ent)
 968{
 969        static int version_printed;
 970
 971        int i, err, pci_using_dac = 0;
 972        unsigned long mmio_start, mmio_len;
 973        const struct board_info *bi;
 974        struct adapter *adapter = NULL;
 975        struct port_info *pi;
 976
 977        if (!version_printed) {
 978                printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
 979                       DRV_VERSION);
 980                ++version_printed;
 981        }
 982
 983        err = pci_enable_device(pdev);
 984        if (err)
 985                return err;
 986
 987        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 988                pr_err("%s: cannot find PCI device memory base address\n",
 989                       pci_name(pdev));
 990                err = -ENODEV;
 991                goto out_disable_pdev;
 992        }
 993
 994        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
 995                pci_using_dac = 1;
 996
 997                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 998                        pr_err("%s: unable to obtain 64-bit DMA for "
 999                               "consistent allocations\n", pci_name(pdev));
1000                        err = -ENODEV;
1001                        goto out_disable_pdev;
1002                }
1003
1004        } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1005                pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1006                goto out_disable_pdev;
1007        }
1008
1009        err = pci_request_regions(pdev, DRV_NAME);
1010        if (err) {
1011                pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1012                goto out_disable_pdev;
1013        }
1014
1015        pci_set_master(pdev);
1016
1017        mmio_start = pci_resource_start(pdev, 0);
1018        mmio_len = pci_resource_len(pdev, 0);
1019        bi = t1_get_board_info(ent->driver_data);
1020
1021        for (i = 0; i < bi->port_number; ++i) {
1022                struct net_device *netdev;
1023
1024                netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1025                if (!netdev) {
1026                        err = -ENOMEM;
1027                        goto out_free_dev;
1028                }
1029
1030                SET_NETDEV_DEV(netdev, &pdev->dev);
1031
1032                if (!adapter) {
1033                        adapter = netdev_priv(netdev);
1034                        adapter->pdev = pdev;
1035                        adapter->port[0].dev = netdev;  /* so we don't leak it */
1036
1037                        adapter->regs = ioremap(mmio_start, mmio_len);
1038                        if (!adapter->regs) {
1039                                pr_err("%s: cannot map device registers\n",
1040                                       pci_name(pdev));
1041                                err = -ENOMEM;
1042                                goto out_free_dev;
1043                        }
1044
1045                        if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1046                                err = -ENODEV;    /* Can't handle this chip rev */
1047                                goto out_free_dev;
1048                        }
1049
1050                        adapter->name = pci_name(pdev);
1051                        adapter->msg_enable = dflt_msg_enable;
1052                        adapter->mmio_len = mmio_len;
1053
1054                        spin_lock_init(&adapter->tpi_lock);
1055                        spin_lock_init(&adapter->work_lock);
1056                        spin_lock_init(&adapter->async_lock);
1057                        spin_lock_init(&adapter->mac_lock);
1058
1059                        INIT_WORK(&adapter->ext_intr_handler_task,
1060                                  ext_intr_task);
1061                        INIT_DELAYED_WORK(&adapter->stats_update_task,
1062                                          mac_stats_task);
1063
1064                        pci_set_drvdata(pdev, netdev);
1065                }
1066
1067                pi = &adapter->port[i];
1068                pi->dev = netdev;
1069                netif_carrier_off(netdev);
1070                netdev->irq = pdev->irq;
1071                netdev->if_port = i;
1072                netdev->mem_start = mmio_start;
1073                netdev->mem_end = mmio_start + mmio_len - 1;
1074                netdev->ml_priv = adapter;
1075                netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1076                        NETIF_F_RXCSUM;
1077                netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1078                        NETIF_F_RXCSUM | NETIF_F_LLTX;
1079
1080                if (pci_using_dac)
1081                        netdev->features |= NETIF_F_HIGHDMA;
1082                if (vlan_tso_capable(adapter)) {
1083#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1084                        netdev->features |=
1085                                NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1086#endif
1087
1088                        /* T204: disable TSO */
1089                        if (!(is_T2(adapter)) || bi->port_number != 4) {
1090                                netdev->hw_features |= NETIF_F_TSO;
1091                                netdev->features |= NETIF_F_TSO;
1092                        }
1093                }
1094
1095                netdev->netdev_ops = &cxgb_netdev_ops;
1096                netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1097                        sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1098
1099                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1100
1101                SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1102        }
1103
1104        if (t1_init_sw_modules(adapter, bi) < 0) {
1105                err = -ENODEV;
1106                goto out_free_dev;
1107        }
1108
1109        /*
1110         * The card is now ready to go.  If any errors occur during device
1111         * registration we do not fail the whole card but rather proceed only
1112         * with the ports we manage to register successfully.  However we must
1113         * register at least one net device.
1114         */
1115        for (i = 0; i < bi->port_number; ++i) {
1116                err = register_netdev(adapter->port[i].dev);
1117                if (err)
1118                        pr_warning("%s: cannot register net device %s, skipping\n",
1119                                   pci_name(pdev), adapter->port[i].dev->name);
1120                else {
1121                        /*
1122                         * Change the name we use for messages to the name of
1123                         * the first successfully registered interface.
1124                         */
1125                        if (!adapter->registered_device_map)
1126                                adapter->name = adapter->port[i].dev->name;
1127
1128                        __set_bit(i, &adapter->registered_device_map);
1129                }
1130        }
1131        if (!adapter->registered_device_map) {
1132                pr_err("%s: could not register any net devices\n",
1133                       pci_name(pdev));
1134                goto out_release_adapter_res;
1135        }
1136
1137        printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1138               bi->desc, adapter->params.chip_revision,
1139               adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1140               adapter->params.pci.speed, adapter->params.pci.width);
1141
1142        /*
1143         * Set the T1B ASIC and memory clocks.
1144         */
1145        if (t1powersave)
1146                adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1147        else
1148                adapter->t1powersave = HCLOCK;
1149        if (t1_is_T1B(adapter))
1150                t1_clock(adapter, t1powersave);
1151
1152        return 0;
1153
1154out_release_adapter_res:
1155        t1_free_sw_modules(adapter);
1156out_free_dev:
1157        if (adapter) {
1158                if (adapter->regs)
1159                        iounmap(adapter->regs);
1160                for (i = bi->port_number - 1; i >= 0; --i)
1161                        if (adapter->port[i].dev)
1162                                free_netdev(adapter->port[i].dev);
1163        }
1164        pci_release_regions(pdev);
1165out_disable_pdev:
1166        pci_disable_device(pdev);
1167        pci_set_drvdata(pdev, NULL);
1168        return err;
1169}
1170
1171static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1172{
1173        int data;
1174        int i;
1175        u32 val;
1176
1177        enum {
1178                S_CLOCK = 1 << 3,
1179                S_DATA = 1 << 4
1180        };
1181
1182        for (i = (nbits - 1); i > -1; i--) {
1183
1184                udelay(50);
1185
1186                data = ((bitdata >> i) & 0x1);
1187                __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1188
1189                if (data)
1190                        val |= S_DATA;
1191                else
1192                        val &= ~S_DATA;
1193
1194                udelay(50);
1195
1196                /* Set SCLOCK low */
1197                val &= ~S_CLOCK;
1198                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1199
1200                udelay(50);
1201
1202                /* Write SCLOCK high */
1203                val |= S_CLOCK;
1204                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1205
1206        }
1207}
1208
1209static int t1_clock(struct adapter *adapter, int mode)
1210{
1211        u32 val;
1212        int M_CORE_VAL;
1213        int M_MEM_VAL;
1214
1215        enum {
1216                M_CORE_BITS     = 9,
1217                T_CORE_VAL      = 0,
1218                T_CORE_BITS     = 2,
1219                N_CORE_VAL      = 0,
1220                N_CORE_BITS     = 2,
1221                M_MEM_BITS      = 9,
1222                T_MEM_VAL       = 0,
1223                T_MEM_BITS      = 2,
1224                N_MEM_VAL       = 0,
1225                N_MEM_BITS      = 2,
1226                NP_LOAD         = 1 << 17,
1227                S_LOAD_MEM      = 1 << 5,
1228                S_LOAD_CORE     = 1 << 6,
1229                S_CLOCK         = 1 << 3
1230        };
1231
1232        if (!t1_is_T1B(adapter))
1233                return -ENODEV; /* Can't re-clock this chip. */
1234
1235        if (mode & 2)
1236                return 0;       /* show current mode. */
1237
1238        if ((adapter->t1powersave & 1) == (mode & 1))
1239                return -EALREADY;       /* ASIC already running in mode. */
1240
1241        if ((mode & 1) == HCLOCK) {
1242                M_CORE_VAL = 0x14;
1243                M_MEM_VAL = 0x18;
1244                adapter->t1powersave = HCLOCK;  /* overclock */
1245        } else {
1246                M_CORE_VAL = 0xe;
1247                M_MEM_VAL = 0x10;
1248                adapter->t1powersave = LCLOCK;  /* underclock */
1249        }
1250
1251        /* Don't interrupt this serial stream! */
1252        spin_lock(&adapter->tpi_lock);
1253
1254        /* Initialize for ASIC core */
1255        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1256        val |= NP_LOAD;
1257        udelay(50);
1258        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1259        udelay(50);
1260        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1261        val &= ~S_LOAD_CORE;
1262        val &= ~S_CLOCK;
1263        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1264        udelay(50);
1265
1266        /* Serial program the ASIC clock synthesizer */
1267        bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1268        bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1269        bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1270        udelay(50);
1271
1272        /* Finish ASIC core */
1273        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1274        val |= S_LOAD_CORE;
1275        udelay(50);
1276        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1277        udelay(50);
1278        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1279        val &= ~S_LOAD_CORE;
1280        udelay(50);
1281        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1282        udelay(50);
1283
1284        /* Initialize for memory */
1285        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1286        val |= NP_LOAD;
1287        udelay(50);
1288        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1289        udelay(50);
1290        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1291        val &= ~S_LOAD_MEM;
1292        val &= ~S_CLOCK;
1293        udelay(50);
1294        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1295        udelay(50);
1296
1297        /* Serial program the memory clock synthesizer */
1298        bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1299        bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1300        bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1301        udelay(50);
1302
1303        /* Finish memory */
1304        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1305        val |= S_LOAD_MEM;
1306        udelay(50);
1307        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1308        udelay(50);
1309        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1310        val &= ~S_LOAD_MEM;
1311        udelay(50);
1312        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1313
1314        spin_unlock(&adapter->tpi_lock);
1315
1316        return 0;
1317}
1318
1319static inline void t1_sw_reset(struct pci_dev *pdev)
1320{
1321        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1322        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1323}
1324
1325static void __devexit remove_one(struct pci_dev *pdev)
1326{
1327        struct net_device *dev = pci_get_drvdata(pdev);
1328        struct adapter *adapter = dev->ml_priv;
1329        int i;
1330
1331        for_each_port(adapter, i) {
1332                if (test_bit(i, &adapter->registered_device_map))
1333                        unregister_netdev(adapter->port[i].dev);
1334        }
1335
1336        t1_free_sw_modules(adapter);
1337        iounmap(adapter->regs);
1338
1339        while (--i >= 0) {
1340                if (adapter->port[i].dev)
1341                        free_netdev(adapter->port[i].dev);
1342        }
1343
1344        pci_release_regions(pdev);
1345        pci_disable_device(pdev);
1346        pci_set_drvdata(pdev, NULL);
1347        t1_sw_reset(pdev);
1348}
1349
1350static struct pci_driver driver = {
1351        .name     = DRV_NAME,
1352        .id_table = t1_pci_tbl,
1353        .probe    = init_one,
1354        .remove   = __devexit_p(remove_one),
1355};
1356
1357static int __init t1_init_module(void)
1358{
1359        return pci_register_driver(&driver);
1360}
1361
1362static void __exit t1_cleanup_module(void)
1363{
1364        pci_unregister_driver(&driver);
1365}
1366
1367module_init(t1_init_module);
1368module_exit(t1_cleanup_module);
1369