linux/drivers/net/chelsio/cxgb2.c
<<
>>
Prefs
   1/*****************************************************************************
   2 *                                                                           *
   3 * File: cxgb2.c                                                             *
   4 * $Revision: 1.25 $                                                         *
   5 * $Date: 2005/06/22 00:43:25 $                                              *
   6 * Description:                                                              *
   7 *  Chelsio 10Gb Ethernet Driver.                                            *
   8 *                                                                           *
   9 * This program is free software; you can redistribute it and/or modify      *
  10 * it under the terms of the GNU General Public License, version 2, as       *
  11 * published by the Free Software Foundation.                                *
  12 *                                                                           *
  13 * You should have received a copy of the GNU General Public License along   *
  14 * with this program; if not, write to the Free Software Foundation, Inc.,   *
  15 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
  16 *                                                                           *
  17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
  19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
  20 *                                                                           *
  21 * http://www.chelsio.com                                                    *
  22 *                                                                           *
  23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
  24 * All rights reserved.                                                      *
  25 *                                                                           *
  26 * Maintainers: maintainers@chelsio.com                                      *
  27 *                                                                           *
  28 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
  29 *          Tina Yang               <tainay@chelsio.com>                     *
  30 *          Felix Marti             <felix@chelsio.com>                      *
  31 *          Scott Bardone           <sbardone@chelsio.com>                   *
  32 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
  33 *          Frank DiMambro          <frank@chelsio.com>                      *
  34 *                                                                           *
  35 * History:                                                                  *
  36 *                                                                           *
  37 ****************************************************************************/
  38
  39#include "common.h"
  40#include <linux/module.h>
  41#include <linux/init.h>
  42#include <linux/pci.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/if_vlan.h>
  46#include <linux/mii.h>
  47#include <linux/sockios.h>
  48#include <linux/dma-mapping.h>
  49#include <asm/uaccess.h>
  50
  51#include "cpl5_cmd.h"
  52#include "regs.h"
  53#include "gmac.h"
  54#include "cphy.h"
  55#include "sge.h"
  56#include "tp.h"
  57#include "espi.h"
  58#include "elmer0.h"
  59
  60#include <linux/workqueue.h>
  61
  62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
  63{
  64        schedule_delayed_work(&ap->stats_update_task, secs * HZ);
  65}
  66
  67static inline void cancel_mac_stats_update(struct adapter *ap)
  68{
  69        cancel_delayed_work(&ap->stats_update_task);
  70}
  71
  72#define MAX_CMDQ_ENTRIES        16384
  73#define MAX_CMDQ1_ENTRIES       1024
  74#define MAX_RX_BUFFERS          16384
  75#define MAX_RX_JUMBO_BUFFERS    16384
  76#define MAX_TX_BUFFERS_HIGH     16384U
  77#define MAX_TX_BUFFERS_LOW      1536U
  78#define MAX_TX_BUFFERS          1460U
  79#define MIN_FL_ENTRIES          32
  80
  81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  82                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  83                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  84
  85/*
  86 * The EEPROM is actually bigger but only the first few bytes are used so we
  87 * only report those.
  88 */
  89#define EEPROM_SIZE 32
  90
  91MODULE_DESCRIPTION(DRV_DESCRIPTION);
  92MODULE_AUTHOR("Chelsio Communications");
  93MODULE_LICENSE("GPL");
  94
  95static int dflt_msg_enable = DFLT_MSG_ENABLE;
  96
  97module_param(dflt_msg_enable, int, 0);
  98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
  99
 100#define HCLOCK 0x0
 101#define LCLOCK 0x1
 102
 103/* T1 cards powersave mode */
 104static int t1_clock(struct adapter *adapter, int mode);
 105static int t1powersave = 1;     /* HW default is powersave mode. */
 106
 107module_param(t1powersave, int, 0);
 108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
 109
 110static int disable_msi = 0;
 111module_param(disable_msi, int, 0);
 112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 113
 114static const char pci_speed[][4] = {
 115        "33", "66", "100", "133"
 116};
 117
 118/*
 119 * Setup MAC to receive the types of packets we want.
 120 */
 121static void t1_set_rxmode(struct net_device *dev)
 122{
 123        struct adapter *adapter = dev->priv;
 124        struct cmac *mac = adapter->port[dev->if_port].mac;
 125        struct t1_rx_mode rm;
 126
 127        rm.dev = dev;
 128        rm.idx = 0;
 129        rm.list = dev->mc_list;
 130        mac->ops->set_rx_mode(mac, &rm);
 131}
 132
 133static void link_report(struct port_info *p)
 134{
 135        if (!netif_carrier_ok(p->dev))
 136                printk(KERN_INFO "%s: link down\n", p->dev->name);
 137        else {
 138                const char *s = "10Mbps";
 139
 140                switch (p->link_config.speed) {
 141                        case SPEED_10000: s = "10Gbps"; break;
 142                        case SPEED_1000:  s = "1000Mbps"; break;
 143                        case SPEED_100:   s = "100Mbps"; break;
 144                }
 145
 146                printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
 147                       p->dev->name, s,
 148                       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 149        }
 150}
 151
 152void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
 153                        int speed, int duplex, int pause)
 154{
 155        struct port_info *p = &adapter->port[port_id];
 156
 157        if (link_stat != netif_carrier_ok(p->dev)) {
 158                if (link_stat)
 159                        netif_carrier_on(p->dev);
 160                else
 161                        netif_carrier_off(p->dev);
 162                link_report(p);
 163
 164                /* multi-ports: inform toe */
 165                if ((speed > 0) && (adapter->params.nports > 1)) {
 166                        unsigned int sched_speed = 10;
 167                        switch (speed) {
 168                        case SPEED_1000:
 169                                sched_speed = 1000;
 170                                break;
 171                        case SPEED_100:
 172                                sched_speed = 100;
 173                                break;
 174                        case SPEED_10:
 175                                sched_speed = 10;
 176                                break;
 177                        }
 178                        t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
 179                }
 180        }
 181}
 182
 183static void link_start(struct port_info *p)
 184{
 185        struct cmac *mac = p->mac;
 186
 187        mac->ops->reset(mac);
 188        if (mac->ops->macaddress_set)
 189                mac->ops->macaddress_set(mac, p->dev->dev_addr);
 190        t1_set_rxmode(p->dev);
 191        t1_link_start(p->phy, mac, &p->link_config);
 192        mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 193}
 194
 195static void enable_hw_csum(struct adapter *adapter)
 196{
 197        if (adapter->flags & TSO_CAPABLE)
 198                t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
 199        if (adapter->flags & UDP_CSUM_CAPABLE)
 200                t1_tp_set_udp_checksum_offload(adapter->tp, 1);
 201        t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
 202}
 203
 204/*
 205 * Things to do upon first use of a card.
 206 * This must run with the rtnl lock held.
 207 */
 208static int cxgb_up(struct adapter *adapter)
 209{
 210        int err = 0;
 211
 212        if (!(adapter->flags & FULL_INIT_DONE)) {
 213                err = t1_init_hw_modules(adapter);
 214                if (err)
 215                        goto out_err;
 216
 217                enable_hw_csum(adapter);
 218                adapter->flags |= FULL_INIT_DONE;
 219        }
 220
 221        t1_interrupts_clear(adapter);
 222
 223        adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
 224        err = request_irq(adapter->pdev->irq, t1_interrupt,
 225                          adapter->params.has_msi ? 0 : IRQF_SHARED,
 226                          adapter->name, adapter);
 227        if (err) {
 228                if (adapter->params.has_msi)
 229                        pci_disable_msi(adapter->pdev);
 230
 231                goto out_err;
 232        }
 233
 234        t1_sge_start(adapter->sge);
 235        t1_interrupts_enable(adapter);
 236out_err:
 237        return err;
 238}
 239
 240/*
 241 * Release resources when all the ports have been stopped.
 242 */
 243static void cxgb_down(struct adapter *adapter)
 244{
 245        t1_sge_stop(adapter->sge);
 246        t1_interrupts_disable(adapter);
 247        free_irq(adapter->pdev->irq, adapter);
 248        if (adapter->params.has_msi)
 249                pci_disable_msi(adapter->pdev);
 250}
 251
 252static int cxgb_open(struct net_device *dev)
 253{
 254        int err;
 255        struct adapter *adapter = dev->priv;
 256        int other_ports = adapter->open_device_map & PORT_MASK;
 257
 258        napi_enable(&adapter->napi);
 259        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
 260                napi_disable(&adapter->napi);
 261                return err;
 262        }
 263
 264        __set_bit(dev->if_port, &adapter->open_device_map);
 265        link_start(&adapter->port[dev->if_port]);
 266        netif_start_queue(dev);
 267        if (!other_ports && adapter->params.stats_update_period)
 268                schedule_mac_stats_update(adapter,
 269                                          adapter->params.stats_update_period);
 270        return 0;
 271}
 272
 273static int cxgb_close(struct net_device *dev)
 274{
 275        struct adapter *adapter = dev->priv;
 276        struct port_info *p = &adapter->port[dev->if_port];
 277        struct cmac *mac = p->mac;
 278
 279        netif_stop_queue(dev);
 280        napi_disable(&adapter->napi);
 281        mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 282        netif_carrier_off(dev);
 283
 284        clear_bit(dev->if_port, &adapter->open_device_map);
 285        if (adapter->params.stats_update_period &&
 286            !(adapter->open_device_map & PORT_MASK)) {
 287                /* Stop statistics accumulation. */
 288                smp_mb__after_clear_bit();
 289                spin_lock(&adapter->work_lock);   /* sync with update task */
 290                spin_unlock(&adapter->work_lock);
 291                cancel_mac_stats_update(adapter);
 292        }
 293
 294        if (!adapter->open_device_map)
 295                cxgb_down(adapter);
 296        return 0;
 297}
 298
 299static struct net_device_stats *t1_get_stats(struct net_device *dev)
 300{
 301        struct adapter *adapter = dev->priv;
 302        struct port_info *p = &adapter->port[dev->if_port];
 303        struct net_device_stats *ns = &p->netstats;
 304        const struct cmac_statistics *pstats;
 305
 306        /* Do a full update of the MAC stats */
 307        pstats = p->mac->ops->statistics_update(p->mac,
 308                                                MAC_STATS_UPDATE_FULL);
 309
 310        ns->tx_packets = pstats->TxUnicastFramesOK +
 311                pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
 312
 313        ns->rx_packets = pstats->RxUnicastFramesOK +
 314                pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
 315
 316        ns->tx_bytes = pstats->TxOctetsOK;
 317        ns->rx_bytes = pstats->RxOctetsOK;
 318
 319        ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
 320                pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
 321        ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
 322                pstats->RxFCSErrors + pstats->RxAlignErrors +
 323                pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
 324                pstats->RxSymbolErrors + pstats->RxRuntErrors;
 325
 326        ns->multicast  = pstats->RxMulticastFramesOK;
 327        ns->collisions = pstats->TxTotalCollisions;
 328
 329        /* detailed rx_errors */
 330        ns->rx_length_errors = pstats->RxFrameTooLongErrors +
 331                pstats->RxJabberErrors;
 332        ns->rx_over_errors   = 0;
 333        ns->rx_crc_errors    = pstats->RxFCSErrors;
 334        ns->rx_frame_errors  = pstats->RxAlignErrors;
 335        ns->rx_fifo_errors   = 0;
 336        ns->rx_missed_errors = 0;
 337
 338        /* detailed tx_errors */
 339        ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
 340        ns->tx_carrier_errors   = 0;
 341        ns->tx_fifo_errors      = pstats->TxUnderrun;
 342        ns->tx_heartbeat_errors = 0;
 343        ns->tx_window_errors    = pstats->TxLateCollisions;
 344        return ns;
 345}
 346
 347static u32 get_msglevel(struct net_device *dev)
 348{
 349        struct adapter *adapter = dev->priv;
 350
 351        return adapter->msg_enable;
 352}
 353
 354static void set_msglevel(struct net_device *dev, u32 val)
 355{
 356        struct adapter *adapter = dev->priv;
 357
 358        adapter->msg_enable = val;
 359}
 360
 361static char stats_strings[][ETH_GSTRING_LEN] = {
 362        "TxOctetsOK",
 363        "TxOctetsBad",
 364        "TxUnicastFramesOK",
 365        "TxMulticastFramesOK",
 366        "TxBroadcastFramesOK",
 367        "TxPauseFrames",
 368        "TxFramesWithDeferredXmissions",
 369        "TxLateCollisions",
 370        "TxTotalCollisions",
 371        "TxFramesAbortedDueToXSCollisions",
 372        "TxUnderrun",
 373        "TxLengthErrors",
 374        "TxInternalMACXmitError",
 375        "TxFramesWithExcessiveDeferral",
 376        "TxFCSErrors",
 377        "TxJumboFramesOk",
 378        "TxJumboOctetsOk",
 379        
 380        "RxOctetsOK",
 381        "RxOctetsBad",
 382        "RxUnicastFramesOK",
 383        "RxMulticastFramesOK",
 384        "RxBroadcastFramesOK",
 385        "RxPauseFrames",
 386        "RxFCSErrors",
 387        "RxAlignErrors",
 388        "RxSymbolErrors",
 389        "RxDataErrors",
 390        "RxSequenceErrors",
 391        "RxRuntErrors",
 392        "RxJabberErrors",
 393        "RxInternalMACRcvError",
 394        "RxInRangeLengthErrors",
 395        "RxOutOfRangeLengthField",
 396        "RxFrameTooLongErrors",
 397        "RxJumboFramesOk",
 398        "RxJumboOctetsOk",
 399
 400        /* Port stats */
 401        "RxCsumGood",
 402        "TxCsumOffload",
 403        "TxTso",
 404        "RxVlan",
 405        "TxVlan",
 406        "TxNeedHeadroom", 
 407        
 408        /* Interrupt stats */
 409        "rx drops",
 410        "pure_rsps",
 411        "unhandled irqs",
 412        "respQ_empty",
 413        "respQ_overflow",
 414        "freelistQ_empty",
 415        "pkt_too_big",
 416        "pkt_mismatch",
 417        "cmdQ_full0",
 418        "cmdQ_full1",
 419
 420        "espi_DIP2ParityErr",
 421        "espi_DIP4Err",
 422        "espi_RxDrops",
 423        "espi_TxDrops",
 424        "espi_RxOvfl",
 425        "espi_ParityErr"
 426};
 427
 428#define T2_REGMAP_SIZE (3 * 1024)
 429
 430static int get_regs_len(struct net_device *dev)
 431{
 432        return T2_REGMAP_SIZE;
 433}
 434
 435static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 436{
 437        struct adapter *adapter = dev->priv;
 438
 439        strcpy(info->driver, DRV_NAME);
 440        strcpy(info->version, DRV_VERSION);
 441        strcpy(info->fw_version, "N/A");
 442        strcpy(info->bus_info, pci_name(adapter->pdev));
 443}
 444
 445static int get_sset_count(struct net_device *dev, int sset)
 446{
 447        switch (sset) {
 448        case ETH_SS_STATS:
 449                return ARRAY_SIZE(stats_strings);
 450        default:
 451                return -EOPNOTSUPP;
 452        }
 453}
 454
 455static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 456{
 457        if (stringset == ETH_SS_STATS)
 458                memcpy(data, stats_strings, sizeof(stats_strings));
 459}
 460
 461static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 462                      u64 *data)
 463{
 464        struct adapter *adapter = dev->priv;
 465        struct cmac *mac = adapter->port[dev->if_port].mac;
 466        const struct cmac_statistics *s;
 467        const struct sge_intr_counts *t;
 468        struct sge_port_stats ss;
 469
 470        s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 471        t = t1_sge_get_intr_counts(adapter->sge);
 472        t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
 473
 474        *data++ = s->TxOctetsOK;
 475        *data++ = s->TxOctetsBad;
 476        *data++ = s->TxUnicastFramesOK;
 477        *data++ = s->TxMulticastFramesOK;
 478        *data++ = s->TxBroadcastFramesOK;
 479        *data++ = s->TxPauseFrames;
 480        *data++ = s->TxFramesWithDeferredXmissions;
 481        *data++ = s->TxLateCollisions;
 482        *data++ = s->TxTotalCollisions;
 483        *data++ = s->TxFramesAbortedDueToXSCollisions;
 484        *data++ = s->TxUnderrun;
 485        *data++ = s->TxLengthErrors;
 486        *data++ = s->TxInternalMACXmitError;
 487        *data++ = s->TxFramesWithExcessiveDeferral;
 488        *data++ = s->TxFCSErrors;
 489        *data++ = s->TxJumboFramesOK;
 490        *data++ = s->TxJumboOctetsOK;
 491
 492        *data++ = s->RxOctetsOK;
 493        *data++ = s->RxOctetsBad;
 494        *data++ = s->RxUnicastFramesOK;
 495        *data++ = s->RxMulticastFramesOK;
 496        *data++ = s->RxBroadcastFramesOK;
 497        *data++ = s->RxPauseFrames;
 498        *data++ = s->RxFCSErrors;
 499        *data++ = s->RxAlignErrors;
 500        *data++ = s->RxSymbolErrors;
 501        *data++ = s->RxDataErrors;
 502        *data++ = s->RxSequenceErrors;
 503        *data++ = s->RxRuntErrors;
 504        *data++ = s->RxJabberErrors;
 505        *data++ = s->RxInternalMACRcvError;
 506        *data++ = s->RxInRangeLengthErrors;
 507        *data++ = s->RxOutOfRangeLengthField;
 508        *data++ = s->RxFrameTooLongErrors;
 509        *data++ = s->RxJumboFramesOK;
 510        *data++ = s->RxJumboOctetsOK;
 511
 512        *data++ = ss.rx_cso_good;
 513        *data++ = ss.tx_cso;
 514        *data++ = ss.tx_tso;
 515        *data++ = ss.vlan_xtract;
 516        *data++ = ss.vlan_insert;
 517        *data++ = ss.tx_need_hdrroom;
 518        
 519        *data++ = t->rx_drops;
 520        *data++ = t->pure_rsps;
 521        *data++ = t->unhandled_irqs;
 522        *data++ = t->respQ_empty;
 523        *data++ = t->respQ_overflow;
 524        *data++ = t->freelistQ_empty;
 525        *data++ = t->pkt_too_big;
 526        *data++ = t->pkt_mismatch;
 527        *data++ = t->cmdQ_full[0];
 528        *data++ = t->cmdQ_full[1];
 529
 530        if (adapter->espi) {
 531                const struct espi_intr_counts *e;
 532
 533                e = t1_espi_get_intr_counts(adapter->espi);
 534                *data++ = e->DIP2_parity_err;
 535                *data++ = e->DIP4_err;
 536                *data++ = e->rx_drops;
 537                *data++ = e->tx_drops;
 538                *data++ = e->rx_ovflw;
 539                *data++ = e->parity_err;
 540        }
 541}
 542
 543static inline void reg_block_dump(struct adapter *ap, void *buf,
 544                                  unsigned int start, unsigned int end)
 545{
 546        u32 *p = buf + start;
 547
 548        for ( ; start <= end; start += sizeof(u32))
 549                *p++ = readl(ap->regs + start);
 550}
 551
 552static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 553                     void *buf)
 554{
 555        struct adapter *ap = dev->priv;
 556
 557        /*
 558         * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
 559         */
 560        regs->version = 2;
 561
 562        memset(buf, 0, T2_REGMAP_SIZE);
 563        reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
 564        reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
 565        reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
 566        reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
 567        reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
 568        reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
 569        reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
 570        reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
 571        reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
 572        reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
 573}
 574
 575static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 576{
 577        struct adapter *adapter = dev->priv;
 578        struct port_info *p = &adapter->port[dev->if_port];
 579
 580        cmd->supported = p->link_config.supported;
 581        cmd->advertising = p->link_config.advertising;
 582
 583        if (netif_carrier_ok(dev)) {
 584                cmd->speed = p->link_config.speed;
 585                cmd->duplex = p->link_config.duplex;
 586        } else {
 587                cmd->speed = -1;
 588                cmd->duplex = -1;
 589        }
 590
 591        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 592        cmd->phy_address = p->phy->addr;
 593        cmd->transceiver = XCVR_EXTERNAL;
 594        cmd->autoneg = p->link_config.autoneg;
 595        cmd->maxtxpkt = 0;
 596        cmd->maxrxpkt = 0;
 597        return 0;
 598}
 599
 600static int speed_duplex_to_caps(int speed, int duplex)
 601{
 602        int cap = 0;
 603
 604        switch (speed) {
 605        case SPEED_10:
 606                if (duplex == DUPLEX_FULL)
 607                        cap = SUPPORTED_10baseT_Full;
 608                else
 609                        cap = SUPPORTED_10baseT_Half;
 610                break;
 611        case SPEED_100:
 612                if (duplex == DUPLEX_FULL)
 613                        cap = SUPPORTED_100baseT_Full;
 614                else
 615                        cap = SUPPORTED_100baseT_Half;
 616                break;
 617        case SPEED_1000:
 618                if (duplex == DUPLEX_FULL)
 619                        cap = SUPPORTED_1000baseT_Full;
 620                else
 621                        cap = SUPPORTED_1000baseT_Half;
 622                break;
 623        case SPEED_10000:
 624                if (duplex == DUPLEX_FULL)
 625                        cap = SUPPORTED_10000baseT_Full;
 626        }
 627        return cap;
 628}
 629
 630#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 631                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 632                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 633                      ADVERTISED_10000baseT_Full)
 634
 635static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 636{
 637        struct adapter *adapter = dev->priv;
 638        struct port_info *p = &adapter->port[dev->if_port];
 639        struct link_config *lc = &p->link_config;
 640
 641        if (!(lc->supported & SUPPORTED_Autoneg))
 642                return -EOPNOTSUPP;             /* can't change speed/duplex */
 643
 644        if (cmd->autoneg == AUTONEG_DISABLE) {
 645                int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
 646
 647                if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
 648                        return -EINVAL;
 649                lc->requested_speed = cmd->speed;
 650                lc->requested_duplex = cmd->duplex;
 651                lc->advertising = 0;
 652        } else {
 653                cmd->advertising &= ADVERTISED_MASK;
 654                if (cmd->advertising & (cmd->advertising - 1))
 655                        cmd->advertising = lc->supported;
 656                cmd->advertising &= lc->supported;
 657                if (!cmd->advertising)
 658                        return -EINVAL;
 659                lc->requested_speed = SPEED_INVALID;
 660                lc->requested_duplex = DUPLEX_INVALID;
 661                lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
 662        }
 663        lc->autoneg = cmd->autoneg;
 664        if (netif_running(dev))
 665                t1_link_start(p->phy, p->mac, lc);
 666        return 0;
 667}
 668
 669static void get_pauseparam(struct net_device *dev,
 670                           struct ethtool_pauseparam *epause)
 671{
 672        struct adapter *adapter = dev->priv;
 673        struct port_info *p = &adapter->port[dev->if_port];
 674
 675        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 676        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 677        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 678}
 679
 680static int set_pauseparam(struct net_device *dev,
 681                          struct ethtool_pauseparam *epause)
 682{
 683        struct adapter *adapter = dev->priv;
 684        struct port_info *p = &adapter->port[dev->if_port];
 685        struct link_config *lc = &p->link_config;
 686
 687        if (epause->autoneg == AUTONEG_DISABLE)
 688                lc->requested_fc = 0;
 689        else if (lc->supported & SUPPORTED_Autoneg)
 690                lc->requested_fc = PAUSE_AUTONEG;
 691        else
 692                return -EINVAL;
 693
 694        if (epause->rx_pause)
 695                lc->requested_fc |= PAUSE_RX;
 696        if (epause->tx_pause)
 697                lc->requested_fc |= PAUSE_TX;
 698        if (lc->autoneg == AUTONEG_ENABLE) {
 699                if (netif_running(dev))
 700                        t1_link_start(p->phy, p->mac, lc);
 701        } else {
 702                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 703                if (netif_running(dev))
 704                        p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
 705                                                         lc->fc);
 706        }
 707        return 0;
 708}
 709
 710static u32 get_rx_csum(struct net_device *dev)
 711{
 712        struct adapter *adapter = dev->priv;
 713
 714        return (adapter->flags & RX_CSUM_ENABLED) != 0;
 715}
 716
 717static int set_rx_csum(struct net_device *dev, u32 data)
 718{
 719        struct adapter *adapter = dev->priv;
 720
 721        if (data)
 722                adapter->flags |= RX_CSUM_ENABLED;
 723        else
 724                adapter->flags &= ~RX_CSUM_ENABLED;
 725        return 0;
 726}
 727
 728static int set_tso(struct net_device *dev, u32 value)
 729{
 730        struct adapter *adapter = dev->priv;
 731
 732        if (!(adapter->flags & TSO_CAPABLE))
 733                return value ? -EOPNOTSUPP : 0;
 734        return ethtool_op_set_tso(dev, value);
 735}
 736
 737static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 738{
 739        struct adapter *adapter = dev->priv;
 740        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 741
 742        e->rx_max_pending = MAX_RX_BUFFERS;
 743        e->rx_mini_max_pending = 0;
 744        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 745        e->tx_max_pending = MAX_CMDQ_ENTRIES;
 746
 747        e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
 748        e->rx_mini_pending = 0;
 749        e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
 750        e->tx_pending = adapter->params.sge.cmdQ_size[0];
 751}
 752
 753static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 754{
 755        struct adapter *adapter = dev->priv;
 756        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 757
 758        if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
 759            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 760            e->tx_pending > MAX_CMDQ_ENTRIES ||
 761            e->rx_pending < MIN_FL_ENTRIES ||
 762            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 763            e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
 764                return -EINVAL;
 765
 766        if (adapter->flags & FULL_INIT_DONE)
 767                return -EBUSY;
 768
 769        adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 770        adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
 771        adapter->params.sge.cmdQ_size[0] = e->tx_pending;
 772        adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
 773                MAX_CMDQ1_ENTRIES : e->tx_pending;
 774        return 0;
 775}
 776
 777static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 778{
 779        struct adapter *adapter = dev->priv;
 780
 781        adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
 782        adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
 783        adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 784        t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 785        return 0;
 786}
 787
 788static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 789{
 790        struct adapter *adapter = dev->priv;
 791
 792        c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
 793        c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
 794        c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
 795        return 0;
 796}
 797
 798static int get_eeprom_len(struct net_device *dev)
 799{
 800        struct adapter *adapter = dev->priv;
 801
 802        return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 803}
 804
 805#define EEPROM_MAGIC(ap) \
 806        (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
 807
 808static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 809                      u8 *data)
 810{
 811        int i;
 812        u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
 813        struct adapter *adapter = dev->priv;
 814
 815        e->magic = EEPROM_MAGIC(adapter);
 816        for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
 817                t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
 818        memcpy(data, buf + e->offset, e->len);
 819        return 0;
 820}
 821
 822static const struct ethtool_ops t1_ethtool_ops = {
 823        .get_settings      = get_settings,
 824        .set_settings      = set_settings,
 825        .get_drvinfo       = get_drvinfo,
 826        .get_msglevel      = get_msglevel,
 827        .set_msglevel      = set_msglevel,
 828        .get_ringparam     = get_sge_param,
 829        .set_ringparam     = set_sge_param,
 830        .get_coalesce      = get_coalesce,
 831        .set_coalesce      = set_coalesce,
 832        .get_eeprom_len    = get_eeprom_len,
 833        .get_eeprom        = get_eeprom,
 834        .get_pauseparam    = get_pauseparam,
 835        .set_pauseparam    = set_pauseparam,
 836        .get_rx_csum       = get_rx_csum,
 837        .set_rx_csum       = set_rx_csum,
 838        .set_tx_csum       = ethtool_op_set_tx_csum,
 839        .set_sg            = ethtool_op_set_sg,
 840        .get_link          = ethtool_op_get_link,
 841        .get_strings       = get_strings,
 842        .get_sset_count    = get_sset_count,
 843        .get_ethtool_stats = get_stats,
 844        .get_regs_len      = get_regs_len,
 845        .get_regs          = get_regs,
 846        .set_tso           = set_tso,
 847};
 848
 849static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 850{
 851        struct adapter *adapter = dev->priv;
 852        struct mii_ioctl_data *data = if_mii(req);
 853
 854        switch (cmd) {
 855        case SIOCGMIIPHY:
 856                data->phy_id = adapter->port[dev->if_port].phy->addr;
 857                /* FALLTHRU */
 858        case SIOCGMIIREG: {
 859                struct cphy *phy = adapter->port[dev->if_port].phy;
 860                u32 val;
 861
 862                if (!phy->mdio_read)
 863                        return -EOPNOTSUPP;
 864                phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 865                               &val);
 866                data->val_out = val;
 867                break;
 868        }
 869        case SIOCSMIIREG: {
 870                struct cphy *phy = adapter->port[dev->if_port].phy;
 871
 872                if (!capable(CAP_NET_ADMIN))
 873                    return -EPERM;
 874                if (!phy->mdio_write)
 875                        return -EOPNOTSUPP;
 876                phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 877                                data->val_in);
 878                break;
 879        }
 880
 881        default:
 882                return -EOPNOTSUPP;
 883        }
 884        return 0;
 885}
 886
 887static int t1_change_mtu(struct net_device *dev, int new_mtu)
 888{
 889        int ret;
 890        struct adapter *adapter = dev->priv;
 891        struct cmac *mac = adapter->port[dev->if_port].mac;
 892
 893        if (!mac->ops->set_mtu)
 894                return -EOPNOTSUPP;
 895        if (new_mtu < 68)
 896                return -EINVAL;
 897        if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 898                return ret;
 899        dev->mtu = new_mtu;
 900        return 0;
 901}
 902
 903static int t1_set_mac_addr(struct net_device *dev, void *p)
 904{
 905        struct adapter *adapter = dev->priv;
 906        struct cmac *mac = adapter->port[dev->if_port].mac;
 907        struct sockaddr *addr = p;
 908
 909        if (!mac->ops->macaddress_set)
 910                return -EOPNOTSUPP;
 911
 912        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 913        mac->ops->macaddress_set(mac, dev->dev_addr);
 914        return 0;
 915}
 916
 917#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 918static void vlan_rx_register(struct net_device *dev,
 919                                   struct vlan_group *grp)
 920{
 921        struct adapter *adapter = dev->priv;
 922
 923        spin_lock_irq(&adapter->async_lock);
 924        adapter->vlan_grp = grp;
 925        t1_set_vlan_accel(adapter, grp != NULL);
 926        spin_unlock_irq(&adapter->async_lock);
 927}
 928#endif
 929
 930#ifdef CONFIG_NET_POLL_CONTROLLER
 931static void t1_netpoll(struct net_device *dev)
 932{
 933        unsigned long flags;
 934        struct adapter *adapter = dev->priv;
 935
 936        local_irq_save(flags);
 937        t1_interrupt(adapter->pdev->irq, adapter);
 938        local_irq_restore(flags);
 939}
 940#endif
 941
 942/*
 943 * Periodic accumulation of MAC statistics.  This is used only if the MAC
 944 * does not have any other way to prevent stats counter overflow.
 945 */
 946static void mac_stats_task(struct work_struct *work)
 947{
 948        int i;
 949        struct adapter *adapter =
 950                container_of(work, struct adapter, stats_update_task.work);
 951
 952        for_each_port(adapter, i) {
 953                struct port_info *p = &adapter->port[i];
 954
 955                if (netif_running(p->dev))
 956                        p->mac->ops->statistics_update(p->mac,
 957                                                       MAC_STATS_UPDATE_FAST);
 958        }
 959
 960        /* Schedule the next statistics update if any port is active. */
 961        spin_lock(&adapter->work_lock);
 962        if (adapter->open_device_map & PORT_MASK)
 963                schedule_mac_stats_update(adapter,
 964                                          adapter->params.stats_update_period);
 965        spin_unlock(&adapter->work_lock);
 966}
 967
 968/*
 969 * Processes elmer0 external interrupts in process context.
 970 */
 971static void ext_intr_task(struct work_struct *work)
 972{
 973        struct adapter *adapter =
 974                container_of(work, struct adapter, ext_intr_handler_task);
 975
 976        t1_elmer0_ext_intr_handler(adapter);
 977
 978        /* Now reenable external interrupts */
 979        spin_lock_irq(&adapter->async_lock);
 980        adapter->slow_intr_mask |= F_PL_INTR_EXT;
 981        writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
 982        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 983                   adapter->regs + A_PL_ENABLE);
 984        spin_unlock_irq(&adapter->async_lock);
 985}
 986
 987/*
 988 * Interrupt-context handler for elmer0 external interrupts.
 989 */
 990void t1_elmer0_ext_intr(struct adapter *adapter)
 991{
 992        /*
 993         * Schedule a task to handle external interrupts as we require
 994         * a process context.  We disable EXT interrupts in the interim
 995         * and let the task reenable them when it's done.
 996         */
 997        adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
 998        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
 999                   adapter->regs + A_PL_ENABLE);
1000        schedule_work(&adapter->ext_intr_handler_task);
1001}
1002
1003void t1_fatal_err(struct adapter *adapter)
1004{
1005        if (adapter->flags & FULL_INIT_DONE) {
1006                t1_sge_stop(adapter->sge);
1007                t1_interrupts_disable(adapter);
1008        }
1009        CH_ALERT("%s: encountered fatal error, operation suspended\n",
1010                 adapter->name);
1011}
1012
1013static int __devinit init_one(struct pci_dev *pdev,
1014                              const struct pci_device_id *ent)
1015{
1016        static int version_printed;
1017
1018        int i, err, pci_using_dac = 0;
1019        unsigned long mmio_start, mmio_len;
1020        const struct board_info *bi;
1021        struct adapter *adapter = NULL;
1022        struct port_info *pi;
1023
1024        if (!version_printed) {
1025                printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1026                       DRV_VERSION);
1027                ++version_printed;
1028        }
1029
1030        err = pci_enable_device(pdev);
1031        if (err)
1032                return err;
1033
1034        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1035                CH_ERR("%s: cannot find PCI device memory base address\n",
1036                       pci_name(pdev));
1037                err = -ENODEV;
1038                goto out_disable_pdev;
1039        }
1040
1041        if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1042                pci_using_dac = 1;
1043
1044                if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1045                        CH_ERR("%s: unable to obtain 64-bit DMA for"
1046                               "consistent allocations\n", pci_name(pdev));
1047                        err = -ENODEV;
1048                        goto out_disable_pdev;
1049                }
1050
1051        } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1052                CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1053                goto out_disable_pdev;
1054        }
1055
1056        err = pci_request_regions(pdev, DRV_NAME);
1057        if (err) {
1058                CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1059                goto out_disable_pdev;
1060        }
1061
1062        pci_set_master(pdev);
1063
1064        mmio_start = pci_resource_start(pdev, 0);
1065        mmio_len = pci_resource_len(pdev, 0);
1066        bi = t1_get_board_info(ent->driver_data);
1067
1068        for (i = 0; i < bi->port_number; ++i) {
1069                struct net_device *netdev;
1070
1071                netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1072                if (!netdev) {
1073                        err = -ENOMEM;
1074                        goto out_free_dev;
1075                }
1076
1077                SET_NETDEV_DEV(netdev, &pdev->dev);
1078
1079                if (!adapter) {
1080                        adapter = netdev->priv;
1081                        adapter->pdev = pdev;
1082                        adapter->port[0].dev = netdev;  /* so we don't leak it */
1083
1084                        adapter->regs = ioremap(mmio_start, mmio_len);
1085                        if (!adapter->regs) {
1086                                CH_ERR("%s: cannot map device registers\n",
1087                                       pci_name(pdev));
1088                                err = -ENOMEM;
1089                                goto out_free_dev;
1090                        }
1091
1092                        if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1093                                err = -ENODEV;    /* Can't handle this chip rev */
1094                                goto out_free_dev;
1095                        }
1096
1097                        adapter->name = pci_name(pdev);
1098                        adapter->msg_enable = dflt_msg_enable;
1099                        adapter->mmio_len = mmio_len;
1100
1101                        spin_lock_init(&adapter->tpi_lock);
1102                        spin_lock_init(&adapter->work_lock);
1103                        spin_lock_init(&adapter->async_lock);
1104                        spin_lock_init(&adapter->mac_lock);
1105
1106                        INIT_WORK(&adapter->ext_intr_handler_task,
1107                                  ext_intr_task);
1108                        INIT_DELAYED_WORK(&adapter->stats_update_task,
1109                                          mac_stats_task);
1110
1111                        pci_set_drvdata(pdev, netdev);
1112                }
1113
1114                pi = &adapter->port[i];
1115                pi->dev = netdev;
1116                netif_carrier_off(netdev);
1117                netdev->irq = pdev->irq;
1118                netdev->if_port = i;
1119                netdev->mem_start = mmio_start;
1120                netdev->mem_end = mmio_start + mmio_len - 1;
1121                netdev->priv = adapter;
1122                netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1123                netdev->features |= NETIF_F_LLTX;
1124
1125                adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1126                if (pci_using_dac)
1127                        netdev->features |= NETIF_F_HIGHDMA;
1128                if (vlan_tso_capable(adapter)) {
1129#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1130                        adapter->flags |= VLAN_ACCEL_CAPABLE;
1131                        netdev->features |=
1132                                NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1133                        netdev->vlan_rx_register = vlan_rx_register;
1134#endif
1135
1136                        /* T204: disable TSO */
1137                        if (!(is_T2(adapter)) || bi->port_number != 4) {
1138                                adapter->flags |= TSO_CAPABLE;
1139                                netdev->features |= NETIF_F_TSO;
1140                        }
1141                }
1142
1143                netdev->open = cxgb_open;
1144                netdev->stop = cxgb_close;
1145                netdev->hard_start_xmit = t1_start_xmit;
1146                netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1147                        sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1148                netdev->get_stats = t1_get_stats;
1149                netdev->set_multicast_list = t1_set_rxmode;
1150                netdev->do_ioctl = t1_ioctl;
1151                netdev->change_mtu = t1_change_mtu;
1152                netdev->set_mac_address = t1_set_mac_addr;
1153#ifdef CONFIG_NET_POLL_CONTROLLER
1154                netdev->poll_controller = t1_netpoll;
1155#endif
1156#ifdef CONFIG_CHELSIO_T1_NAPI
1157                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1158#endif
1159
1160                SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1161        }
1162
1163        if (t1_init_sw_modules(adapter, bi) < 0) {
1164                err = -ENODEV;
1165                goto out_free_dev;
1166        }
1167
1168        /*
1169         * The card is now ready to go.  If any errors occur during device
1170         * registration we do not fail the whole card but rather proceed only
1171         * with the ports we manage to register successfully.  However we must
1172         * register at least one net device.
1173         */
1174        for (i = 0; i < bi->port_number; ++i) {
1175                err = register_netdev(adapter->port[i].dev);
1176                if (err)
1177                        CH_WARN("%s: cannot register net device %s, skipping\n",
1178                                pci_name(pdev), adapter->port[i].dev->name);
1179                else {
1180                        /*
1181                         * Change the name we use for messages to the name of
1182                         * the first successfully registered interface.
1183                         */
1184                        if (!adapter->registered_device_map)
1185                                adapter->name = adapter->port[i].dev->name;
1186
1187                        __set_bit(i, &adapter->registered_device_map);
1188                }
1189        }
1190        if (!adapter->registered_device_map) {
1191                CH_ERR("%s: could not register any net devices\n",
1192                       pci_name(pdev));
1193                goto out_release_adapter_res;
1194        }
1195
1196        printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1197               bi->desc, adapter->params.chip_revision,
1198               adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1199               adapter->params.pci.speed, adapter->params.pci.width);
1200
1201        /*
1202         * Set the T1B ASIC and memory clocks.
1203         */
1204        if (t1powersave)
1205                adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1206        else
1207                adapter->t1powersave = HCLOCK;
1208        if (t1_is_T1B(adapter))
1209                t1_clock(adapter, t1powersave);
1210
1211        return 0;
1212
1213out_release_adapter_res:
1214        t1_free_sw_modules(adapter);
1215out_free_dev:
1216        if (adapter) {
1217                if (adapter->regs)
1218                        iounmap(adapter->regs);
1219                for (i = bi->port_number - 1; i >= 0; --i)
1220                        if (adapter->port[i].dev)
1221                                free_netdev(adapter->port[i].dev);
1222        }
1223        pci_release_regions(pdev);
1224out_disable_pdev:
1225        pci_disable_device(pdev);
1226        pci_set_drvdata(pdev, NULL);
1227        return err;
1228}
1229
1230static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1231{
1232        int data;
1233        int i;
1234        u32 val;
1235
1236        enum {
1237                S_CLOCK = 1 << 3,
1238                S_DATA = 1 << 4
1239        };
1240
1241        for (i = (nbits - 1); i > -1; i--) {
1242
1243                udelay(50);
1244
1245                data = ((bitdata >> i) & 0x1);
1246                __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1247
1248                if (data)
1249                        val |= S_DATA;
1250                else
1251                        val &= ~S_DATA;
1252
1253                udelay(50);
1254
1255                /* Set SCLOCK low */
1256                val &= ~S_CLOCK;
1257                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1258
1259                udelay(50);
1260
1261                /* Write SCLOCK high */
1262                val |= S_CLOCK;
1263                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1264
1265        }
1266}
1267
1268static int t1_clock(struct adapter *adapter, int mode)
1269{
1270        u32 val;
1271        int M_CORE_VAL;
1272        int M_MEM_VAL;
1273
1274        enum {
1275                M_CORE_BITS     = 9,
1276                T_CORE_VAL      = 0,
1277                T_CORE_BITS     = 2,
1278                N_CORE_VAL      = 0,
1279                N_CORE_BITS     = 2,
1280                M_MEM_BITS      = 9,
1281                T_MEM_VAL       = 0,
1282                T_MEM_BITS      = 2,
1283                N_MEM_VAL       = 0,
1284                N_MEM_BITS      = 2,
1285                NP_LOAD         = 1 << 17,
1286                S_LOAD_MEM      = 1 << 5,
1287                S_LOAD_CORE     = 1 << 6,
1288                S_CLOCK         = 1 << 3
1289        };
1290
1291        if (!t1_is_T1B(adapter))
1292                return -ENODEV; /* Can't re-clock this chip. */
1293
1294        if (mode & 2)
1295                return 0;       /* show current mode. */
1296
1297        if ((adapter->t1powersave & 1) == (mode & 1))
1298                return -EALREADY;       /* ASIC already running in mode. */
1299
1300        if ((mode & 1) == HCLOCK) {
1301                M_CORE_VAL = 0x14;
1302                M_MEM_VAL = 0x18;
1303                adapter->t1powersave = HCLOCK;  /* overclock */
1304        } else {
1305                M_CORE_VAL = 0xe;
1306                M_MEM_VAL = 0x10;
1307                adapter->t1powersave = LCLOCK;  /* underclock */
1308        }
1309
1310        /* Don't interrupt this serial stream! */
1311        spin_lock(&adapter->tpi_lock);
1312
1313        /* Initialize for ASIC core */
1314        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1315        val |= NP_LOAD;
1316        udelay(50);
1317        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1318        udelay(50);
1319        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1320        val &= ~S_LOAD_CORE;
1321        val &= ~S_CLOCK;
1322        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1323        udelay(50);
1324
1325        /* Serial program the ASIC clock synthesizer */
1326        bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1327        bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1328        bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1329        udelay(50);
1330
1331        /* Finish ASIC core */
1332        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1333        val |= S_LOAD_CORE;
1334        udelay(50);
1335        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1336        udelay(50);
1337        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1338        val &= ~S_LOAD_CORE;
1339        udelay(50);
1340        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1341        udelay(50);
1342
1343        /* Initialize for memory */
1344        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1345        val |= NP_LOAD;
1346        udelay(50);
1347        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1348        udelay(50);
1349        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1350        val &= ~S_LOAD_MEM;
1351        val &= ~S_CLOCK;
1352        udelay(50);
1353        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1354        udelay(50);
1355
1356        /* Serial program the memory clock synthesizer */
1357        bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1358        bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1359        bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1360        udelay(50);
1361
1362        /* Finish memory */
1363        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1364        val |= S_LOAD_MEM;
1365        udelay(50);
1366        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1367        udelay(50);
1368        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1369        val &= ~S_LOAD_MEM;
1370        udelay(50);
1371        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1372
1373        spin_unlock(&adapter->tpi_lock);
1374
1375        return 0;
1376}
1377
1378static inline void t1_sw_reset(struct pci_dev *pdev)
1379{
1380        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1381        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1382}
1383
1384static void __devexit remove_one(struct pci_dev *pdev)
1385{
1386        struct net_device *dev = pci_get_drvdata(pdev);
1387        struct adapter *adapter = dev->priv;
1388        int i;
1389
1390        for_each_port(adapter, i) {
1391                if (test_bit(i, &adapter->registered_device_map))
1392                        unregister_netdev(adapter->port[i].dev);
1393        }
1394
1395        t1_free_sw_modules(adapter);
1396        iounmap(adapter->regs);
1397
1398        while (--i >= 0) {
1399                if (adapter->port[i].dev)
1400                        free_netdev(adapter->port[i].dev);
1401        }
1402
1403        pci_release_regions(pdev);
1404        pci_disable_device(pdev);
1405        pci_set_drvdata(pdev, NULL);
1406        t1_sw_reset(pdev);
1407}
1408
1409static struct pci_driver driver = {
1410        .name     = DRV_NAME,
1411        .id_table = t1_pci_tbl,
1412        .probe    = init_one,
1413        .remove   = __devexit_p(remove_one),
1414};
1415
1416static int __init t1_init_module(void)
1417{
1418        return pci_register_driver(&driver);
1419}
1420
1421static void __exit t1_cleanup_module(void)
1422{
1423        pci_unregister_driver(&driver);
1424}
1425
1426module_init(t1_init_module);
1427module_exit(t1_cleanup_module);
1428